From a694ad16dc01cfd4272b49df0be45fbf78b41fcd Mon Sep 17 00:00:00 2001 From: driazati Date: Thu, 1 Dec 2022 16:27:50 -0800 Subject: [PATCH 1/5] rebase --- .gitattributes | 1 + ci/jenkins/Makefile | 27 ---- ci/jenkins/data.py | 122 +++++++++++++++ ci/jenkins/generate.py | 146 +++++++++--------- ci/jenkins/requirements.txt | 1 - .../templates/arm_jenkinsfile.groovy.j2 | 2 + ci/jenkins/templates/utils/base.groovy.j2 | 108 +++++++++++++ ci/jenkins/{ => templates/utils}/macros.j2 | 105 +++---------- ci/scripts/jenkins/open_docker_update_pr.py | 50 +++--- docker/dev_common.sh | 18 +-- tests/lint/check_file_type.py | 2 + tests/lint/rat-excludes | 9 +- 12 files changed, 351 insertions(+), 240 deletions(-) delete mode 100644 ci/jenkins/Makefile create mode 100644 ci/jenkins/data.py delete mode 100644 ci/jenkins/requirements.txt create mode 100644 ci/jenkins/templates/arm_jenkinsfile.groovy.j2 create mode 100644 ci/jenkins/templates/utils/base.groovy.j2 rename ci/jenkins/{ => templates/utils}/macros.j2 (58%) diff --git a/.gitattributes b/.gitattributes index 1c7a460675f8..d82bd5436b21 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1,2 @@ Jenkinsfile linguist-generated=true +ci/jenkins/generated/* linguist-generated=true diff --git a/ci/jenkins/Makefile b/ci/jenkins/Makefile deleted file mode 100644 index 5c9e0ac54057..000000000000 --- a/ci/jenkins/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -_venv: requirements.txt - rm -rf _venv - python3 -mvenv _venv - _venv/bin/pip3 install -r requirements.txt - -all: _venv - _venv/bin/python3 generate.py - -.PHONY: all venv -.DEFAULT_GOAL=all diff --git a/ci/jenkins/data.py b/ci/jenkins/data.py new file mode 100644 index 000000000000..492608870e01 --- /dev/null +++ b/ci/jenkins/data.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import sys + + +files_to_stash = { + # Executables and build files needed to run c++ tests + "cpptest": ["build/cpptest", "build/build.ninja", "build/CMakeFiles/rules.ninja"], + # Executables and build files needed to c runtime tests + "crttest": ["build/crttest"], + # Folder for hexagon build + "hexagon_api": [ + "build/hexagon_api_output", + ], + # Folder for microtvm build + "microtvm_template_projects": [ + "build/microtvm_template_projects", + ], + # Folders and build files for c runtime + "standalone_crt": ["build/standalone_crt", "build/build.ninja"], + # This library is produced with HIDE_PRIVATE_SYMBOLS=ON + "tvm_allvisible": ["build/libtvm_allvisible.so"], + # runtime files + "tvm_runtime": ["build/libtvm_runtime.so", "build/config.cmake"], + # compiler files + "tvm_lib": ["build/libtvm.so", "build/libtvm_runtime.so", "build/config.cmake"], + # compiler files and fsim + "tvm_multilib": [ + "build/libtvm.so", + "build/libvta_fsim.so", + "build/libtvm_runtime.so", + "build/config.cmake", + ], + # compiler files, fsim, and tsim + "tvm_multilib_tsim": [ + "build/libvta_tsim.so", + "build/libtvm.so", + "build/libvta_fsim.so", + "build/libtvm_runtime.so", + "build/config.cmake", + ], +} + + +# AWS info +aws_default_region = "us-west-2" +aws_ecr_url = "dkr.ecr." + aws_default_region + ".amazonaws.com" + +# Docker Images +docker_images = { + "ci_arm": { + "tag": "tlcpack/ci-arm:20221013-060115-61c9742ea", + "platform": "ARM", + }, + "ci_cortexm": { + "tag": "tlcpack/ci-cortexm:20221013-060115-61c9742ea", + "platform": "CPU", + }, + "ci_cpu": { + "tag": "tlcpack/ci-cpu:20221013-060115-61c9742ea", + "platform": "CPU", + }, + "ci_gpu": { + "tag": "tlcpack/ci-gpu:20221019-060125-0b4836739", + "platform": "GPU", + }, + "ci_hexagon": { + "tag": "tlcpack/ci-hexagon:20221013-060115-61c9742ea", + "platform": "CPU", + }, + "ci_i386": { + "tag": "tlcpack/ci-i386:20221013-060115-61c9742ea", + "platform": "CPU", + }, + "ci_lint": { + "tag": "tlcpack/ci-lint:20221013-060115-61c9742ea", + "platform": "CPU", + }, + "ci_minimal": { + "tag": "tlcpack/ci-minimal:20221013-060115-61c9742ea", + "platform": "CPU", + }, + "ci_riscv": { + "tag": "tlcpack/ci-riscv:20221013-060115-61c9742ea", + "platform": "CPU", + }, + "ci_wasm": { + "tag": "tlcpack/ci-wasm:20221013-060115-61c9742ea", + "platform": "CPU", + }, +} + +data = { + "images": [{"name": k, "platform": v["platform"]} for k, v in docker_images.items()], + "aws_default_region": aws_default_region, + "aws_ecr_url": aws_ecr_url, + **{k: v["tag"] for k, v in docker_images.items()}, + **files_to_stash, +} + +if __name__ == "__main__": + # This is used in docker/dev_common.sh to look up image tags + name = sys.argv[1] + if name in docker_images: + print(docker_images[name]["tag"]) + else: + exit(1) diff --git a/ci/jenkins/generate.py b/ci/jenkins/generate.py index 07bf4b5a8dad..30c12be5f95d 100644 --- a/ci/jenkins/generate.py +++ b/ci/jenkins/generate.py @@ -23,12 +23,16 @@ import textwrap from pathlib import Path -from typing import List +from typing import List, Optional +from dataclasses import dataclass + +from data import data REPO_ROOT = Path(__file__).resolve().parent.parent.parent -JENKINSFILE_TEMPLATE = REPO_ROOT / "ci" / "jenkins" / "Jenkinsfile.j2" -JENKINSFILE = REPO_ROOT / "Jenkinsfile" +JENKINS_DIR = REPO_ROOT / "ci" / "jenkins" +TEMPLATES_DIR = JENKINS_DIR / "templates" +GENERATED_DIR = JENKINS_DIR / "generated" class Change: @@ -37,50 +41,12 @@ class Change: FULL = object() -data = { - "images": [ - { - "name": "ci_arm", - "platform": "ARM", - }, - { - "name": "ci_cortexm", - "platform": "CPU", - }, - { - "name": "ci_cpu", - "platform": "CPU", - }, - { - "name": "ci_gpu", - "platform": "CPU", - }, - { - "name": "ci_hexagon", - "platform": "CPU", - }, - { - "name": "ci_i386", - "platform": "CPU", - }, - { - "name": "ci_lint", - "platform": "CPU", - }, - { - "name": "ci_minimal", - "platform": "CPU", - }, - { - "name": "ci_riscv", - "platform": "CPU", - }, - { - "name": "ci_wasm", - "platform": "CPU", - }, - ] -} +@dataclass +class ChangeData: + diff: Optional[str] + content: str + destination: Path + source: Path def lines_without_generated_tag(content): @@ -133,36 +99,44 @@ def change_type(lines: List[str]) -> Change: return Change.FULL -if __name__ == "__main__": - help = "Regenerate Jenkinsfile from template" - parser = argparse.ArgumentParser(description=help) - parser.add_argument("--force", action="store_true", help="always overwrite timestamp") - parser.add_argument("--check", action="store_true", help="just verify the output didn't change") - args = parser.parse_args() - - with open(JENKINSFILE) as f: - content = f.read() +def update_jenkinsfile(source: Path) -> ChangeData: + destination = GENERATED_DIR / source.stem data["generated_time"] = datetime.datetime.now().isoformat() - timestamp_match = re.search(r"^// Generated at (.*)$", content, flags=re.MULTILINE) - if not timestamp_match: - raise RuntimeError("Could not find timestamp in Jenkinsfile") - original_timestamp = timestamp_match.groups()[0] + if destination.exists(): + with open(destination) as f: + old_generated_content = f.read() + + timestamp_match = re.search( + r"^// Generated at (.*)$", old_generated_content, flags=re.MULTILINE + ) + if not timestamp_match: + raise RuntimeError( + f"Could not find timestamp in Jenkinsfile: {destination.relative_to(TEMPLATES_DIR)}" + ) + original_timestamp = timestamp_match.groups()[0] environment = jinja2.Environment( - loader=jinja2.FileSystemLoader(REPO_ROOT), + loader=jinja2.FileSystemLoader(TEMPLATES_DIR), undefined=jinja2.StrictUndefined, lstrip_blocks=True, trim_blocks=True, keep_trailing_newline=True, ) - template = environment.get_template(str(JENKINSFILE_TEMPLATE.relative_to(REPO_ROOT))) + template = environment.get_template(str(source.relative_to(TEMPLATES_DIR))) new_content = template.render(**data) + if not destination.exists(): + # New file, create it from scratch + return ChangeData( + diff=new_content, content=new_content, source=source, destination=destination + ) + diff = [ line for line in difflib.unified_diff( - lines_without_generated_tag(content), lines_without_generated_tag(new_content) + lines_without_generated_tag(old_generated_content), + lines_without_generated_tag(new_content), ) ] change = change_type(diff) @@ -173,17 +147,30 @@ def change_type(lines: List[str]) -> Change: diff = "".join(diff) + return ChangeData(diff=diff, content=new_content, source=source, destination=destination) + + +if __name__ == "__main__": + help = "Regenerate Jenkinsfile from template" + parser = argparse.ArgumentParser(description=help) + parser.add_argument("--force", action="store_true", help="always overwrite timestamp") + parser.add_argument("--check", action="store_true", help="just verify the output didn't change") + args = parser.parse_args() + + sources = TEMPLATES_DIR.glob("*_jenkinsfile.groovy.j2") + changes = [update_jenkinsfile(source) for source in sources if source.name != "base.groovy.j2"] + if args.check: - if not diff: - print("Success, the newly generated Jenkinsfile matched the one on disk") + if all(not data.diff for data in changes): + print("Success, the newly generated Jenkinsfiles matched the ones on disk") exit(0) else: print( textwrap.dedent( """ - Newly generated Jenkinsfile did not match the one on disk! If you have made - edits to the Jenkinsfile, move them to 'jenkins/Jenkinsfile.j2' and - regenerate the Jenkinsfile from the template with + Newly generated Jenkinsfiles did not match the ones on disk! If you have made + edits to the Jenkinsfiles in generated/, move them to the corresponding source and + regenerate the Jenkinsfiles from the templates with python3 -m pip install -r jenkins/requirements.txt python3 jenkins/generate.py @@ -192,13 +179,20 @@ def change_type(lines: List[str]) -> Change: """ ).strip() ) - print(diff) + for data in changes: + if data.diff: + source = data.source.relative_to(REPO_ROOT) + print(source) + print(data.diff) + exit(1) else: - with open(JENKINSFILE, "w") as f: - f.write(new_content) - if not diff: - print(f"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, no changes made") - else: - print(f"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, changes:") - print(diff) + for data in changes: + with open(data.destination, "w") as f: + f.write(data.content) + + if not data.diff: + print(f"Wrote output to {data.destination.relative_to(REPO_ROOT)}, no changes made") + else: + print(f"Wrote output to {data.destination.relative_to(REPO_ROOT)}, changes:") + print(data.diff) diff --git a/ci/jenkins/requirements.txt b/ci/jenkins/requirements.txt deleted file mode 100644 index d8086eca6e41..000000000000 --- a/ci/jenkins/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Jinja2>=3.0.0 diff --git a/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 b/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 new file mode 100644 index 000000000000..59d0ebc0e621 --- /dev/null +++ b/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 @@ -0,0 +1,2 @@ +{% include "utils/base.groovy.j2" with context %} +{% import 'utils/macros.j2' as m with context -%} diff --git a/ci/jenkins/templates/utils/base.groovy.j2 b/ci/jenkins/templates/utils/base.groovy.j2 new file mode 100644 index 000000000000..304b6c4b378c --- /dev/null +++ b/ci/jenkins/templates/utils/base.groovy.j2 @@ -0,0 +1,108 @@ +#!groovy +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// Docker env used for testing +// Different image may have different version tag +// because some of them are more stable than anoter. +// +// Docker images are maintained by PMC, cached in dockerhub +// and remains relatively stable over the time. +// Flow for upgrading docker env(need commiter) +// +// - Send PR to upgrade build script in the repo +// - Build the new docker image +// - Tag the docker image with a new version and push to a binary cache. +// - Update the version in the Jenkinsfile, send a PR +// - Fix any issues wrt to the new image version in the PR +// - Merge the PR and now we are in new version +// - Tag the new version as the lates +// - Periodically cleanup the old versions on local workers +// + +// ============================= IMPORTANT NOTE ============================= +// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! +// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with +// 'python3 jenkins/generate.py' +// Note: This timestamp is here to ensure that updates to the Jenkinsfile are +// always rebased on main before merging: +// Generated at {{ generated_time }} + +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils +{% import 'utils/macros.j2' as m with context -%} + +ci_lint = '{{ ci_lint }}' +ci_gpu = '{{ ci_gpu }}' +ci_cpu = '{{ ci_cpu }}' +ci_minimal = '{{ ci_minimal }}' +ci_wasm = '{{ ci_wasm }}' +ci_i386 = '{{ ci_i386 }}' +ci_cortexm = '{{ ci_cortexm }}' +ci_arm = '{{ ci_arm }}' +ci_hexagon = '{{ ci_hexagon }}' +ci_riscv = '{{ ci_riscv }}' + +// Parameters to allow overriding (in Jenkins UI), the images +// to be used by a given build. When provided, they take precedence +// over default values above. +properties([ + parameters([ + {% for image in images %} + string(name: '{{ image.name }}_param', defaultValue: ''), + {% endfor %} + ]) +]) + +// Placeholders for newly built Docker image names (if rebuild_docker_images +// is used) +{% for image in images %} + built_{{ image.name }} = null; +{% endfor %} + +// Global variable assigned during Sanity Check that holds the sha1 which should be +// merged into the PR in all branches. +upstream_revision = null + +// command to start a docker container +docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' +docker_build = 'docker/build.sh' +// timeout in minutes +max_time = 180 +rebuild_docker_images = false + +s3_bucket = 'tvm-jenkins-artifacts-prod' +s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" + +// Jenkins script root directory +jenkins_scripts_root = "ci/scripts/jenkins" + + +// General note: Jenkins has limits on the size of a method (or top level code) +// that are pretty strict, so most usage of groovy methods in these templates +// are purely to satisfy the JVM +{% include "ci/jenkins/Prepare.groovy.j2" %} +{% include "ci/jenkins/Build.groovy.j2" %} +{% include "ci/jenkins/Test.groovy.j2" %} + +cancel_previous_build() + +prepare() diff --git a/ci/jenkins/macros.j2 b/ci/jenkins/templates/utils/macros.j2 similarity index 58% rename from ci/jenkins/macros.j2 rename to ci/jenkins/templates/utils/macros.j2 index ff59a4046179..5c65318477da 100644 --- a/ci/jenkins/macros.j2 +++ b/ci/jenkins/templates/utils/macros.j2 @@ -26,13 +26,13 @@ sh( ) {% endmacro %} -{% macro sharded_test_step(name, num_shards, node, ws, docker_image, platform, test_method_names) %} +{% macro sharded_test_step(name, num_shards, node, ws, docker_image, platform, test_method_names, condition="!skip_ci && is_docs_only_build != 1") %} {% for shard_index in range(1, num_shards + 1) %} {% set method_name = "shard_run_" + name.replace(":", "").replace(" ", "-").replace("-", "_") + "_" + shard_index|string + "_of_" + num_shards|string %} {% set test_dir_name = name.replace(":", "").replace(" ", "-").replace("-", "_")|string %} def {{ method_name }}() { - if (!skip_ci && is_docs_only_build != 1) { + if ({{ condition }}) { node('{{ node }}') { ws({{ per_exec_ws(ws) }}) { try { @@ -67,30 +67,9 @@ def {{ method_name }}() { {% endfor %} {% endmacro %} -{% macro sharded_lint_step(name, num_shards, docker_image, node, ws) %} -{% for shard_index in range(1, num_shards + 1) %} - '{{ name }} {{ shard_index }} of {{ num_shards }}': { - node('{{ node }}') { - ws({{ per_exec_ws(ws) }}) { - init_git() - docker_init({{ docker_image }}) - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'TVM_NUM_SHARDS={{ num_shards }}', - 'TEST_STEP_NAME={{ name }}', - 'TVM_SHARD_INDEX={{ shard_index - 1 }}', - "SKIP_SLOW_TESTS=${skip_slow_tests}"], { - {{ caller() | trim | indent(width=6) }} - }) - } - } - } - }, -{% endfor %} -{% endmacro %} - -{% macro build_step(name, condition, node, docker_image, ws) %} - '{{ name }}': { +{% macro invoke_build(name, condition, node, docker_image, ws) %} +def build() { + stage('Build') { if ({{ condition }}) { node('{{ node }}') { ws({{ per_exec_ws(ws) }}) { @@ -104,67 +83,27 @@ def {{ method_name }}() { } else { Utils.markStageSkippedForConditional('{{ name }}') } - }, -{% endmacro %} - -{% macro test_step_body(name, node, ws, docker_image, platform) %} -{% set test_dir_name = name.replace(":", "").replace(" ", "-").replace("-", "_")|string %} - if (!skip_ci && is_docs_only_build != 1) { - node('{{ node }}') { - ws({{ per_exec_ws(ws) }}) { - timeout(time: max_time, unit: 'MINUTES') { - try { - init_git() - docker_init({{ docker_image }}) - withEnv(['PLATFORM={{ platform }}'], { - {{ caller() | indent(width=8) | trim }} - }) - } finally { - try { - {{ junit_to_s3(test_dir_name) | indent(width=0) }} - junit 'build/pytest-results/*.xml' - } catch (Exception e) { - echo 'Exception during JUnit upload: ' + e.toString() - } - } - } - } - } - } else { - Utils.markStageSkippedForConditional('{{ name }}') } +} +build() {% endmacro %} -{% macro test_step(name, node, ws, docker_image, platform) %} -{% set test_dir_name = name.replace(":", "").replace(" ", "-").replace("-", "_")|string %} - '{{ name }}': { - if (!skip_ci && is_docs_only_build != 1) { - node('{{ node }}') { - ws({{ per_exec_ws(ws) }}) { - timeout(time: max_time, unit: 'MINUTES') { - try { - init_git() - docker_init({{ docker_image }}) - withEnv(['PLATFORM={{ platform }}', - 'TEST_STEP_NAME={{ name }}', - "SKIP_SLOW_TESTS=${skip_slow_tests}"], { - {{ caller() | indent(width=12) | trim }} - }) - } finally { - try { - {{ junit_to_s3(test_dir_name) | indent(width=4) }} - junit 'build/pytest-results/*.xml' - } catch (Exception e) { - echo 'Exception during JUnit upload: ' + e.toString() - } - } - } - } - } - } else { - Utils.markStageSkippedForConditional('{{ name }}') +{% macro invoke_tests(test_method_names) %} +def test() { + stage('Test') { + environment { + SKIP_SLOW_TESTS = "${skip_slow_tests}" } - }, + parallel( + {% for stage_name, method_name in test_method_names %} + '{{ stage_name }}': { + {{ method_name }}() + }, + {% endfor %} + ) + } +} +test() {% endmacro %} {% macro deploy_step(name, feature_flag, ws) %} diff --git a/ci/scripts/jenkins/open_docker_update_pr.py b/ci/scripts/jenkins/open_docker_update_pr.py index 9dcb241d5fd8..f11d00f765df 100755 --- a/ci/scripts/jenkins/open_docker_update_pr.py +++ b/ci/scripts/jenkins/open_docker_update_pr.py @@ -22,15 +22,16 @@ import os import json import re +import shlex from urllib import error from typing import List, Dict, Any, Optional, Callable from git_utils import git, parse_remote, GitHubRepo from cmd_utils import REPO_ROOT, init_log from should_rebuild_docker import docker_api -JENKINSFILE = REPO_ROOT / "ci" / "jenkins" / "Jenkinsfile.j2" -GENERATED_JENKINSFILE = REPO_ROOT / "Jenkinsfile" -GENERATE_SCRIPT = REPO_ROOT / "ci" / "jenkins" / "generate.py" +JENKINS_DIR = REPO_ROOT / "ci" / "jenkins" +IMAGES_FILE = JENKINS_DIR / "data.py" +GENERATE_SCRIPT = JENKINS_DIR / "generate.py" GITHUB_TOKEN = os.environ["GITHUB_TOKEN"] BRANCH = "nightly-docker-update" @@ -125,51 +126,41 @@ def latest_tlcpackstaging_image(source: str) -> Optional[str]: user, repo = parse_remote(remote) # Read the existing images from the Jenkinsfile - logging.info(f"Reading {JENKINSFILE}") - with open(JENKINSFILE) as f: + logging.info(f"Reading {IMAGES_FILE}") + with open(IMAGES_FILE) as f: content = f.readlines() # Build a new Jenkinsfile with the latest images from tlcpack or tlcpackstaging - new_content = [] replacements = {} + for line in content: - m = re.match(r"^(ci_[a-zA-Z0-9]+) = \'(.*)\'", line.strip()) + m = re.match(r"^(ci_[a-zA-Z0-9]+) = \"(.*)\"", line.strip()) if m is not None: logging.info(f"Found match on line {line.strip()}") groups = m.groups() new_image = latest_tlcpackstaging_image(groups[1]) if new_image is None: logging.info(f"No new image found") - new_content.append(line) else: logging.info(f"Using new image {new_image}") new_line = f"{groups[0]} = '{new_image}'\n" - new_content.append(new_line) replacements[line] = new_line - else: - new_content.append(line) - # Write out the new content - if args.dry_run: - logging.info(f"Dry run, would have written new content to {JENKINSFILE}") - else: - logging.info(f"Writing new content to {JENKINSFILE}") - with open(JENKINSFILE, "w") as f: - f.write("".join(new_content)) + # Re-generate the Jenkinsfiles + command = f"python3 {shlex.quote(str(GENERATE_SCRIPT))}" - # Re-generate the Jenkinsfile - logging.info(f"Editing {GENERATED_JENKINSFILE}") - with open(GENERATED_JENKINSFILE) as f: - generated_content = f.read() + content = "\n".join(content) + for old_line, new_line in replacements.items(): + content = content.replace(old_line, new_line) - for original_line, new_line in replacements.items(): - generated_content = generated_content.replace(original_line, new_line) + print(f"Updated to:\n{content}") if args.dry_run: - print(f"Would have written:\n{generated_content}") + print(f"Would have run:\n{command}") else: - with open(GENERATED_JENKINSFILE, "w") as f: - f.write(generated_content) + with open(IMAGES_FILE, "w") as f: + f.write(content) + Sh().run(command) # Publish the PR title = "[ci][docker] Nightly Docker image update" @@ -177,12 +168,11 @@ def latest_tlcpackstaging_image(source: str) -> Optional[str]: message = f"{title}\n\n\n{body}" if args.dry_run: - logging.info("Dry run, would have committed Jenkinsfile") + logging.info("Dry run, would have committed Jenkinsfiles") else: logging.info(f"Creating git commit") git(["checkout", "-B", BRANCH]) - git(["add", str(JENKINSFILE.relative_to(REPO_ROOT))]) - git(["add", str(GENERATED_JENKINSFILE.relative_to(REPO_ROOT))]) + git(["add", str(JENKINS_DIR.relative_to(REPO_ROOT))]) git(["config", "user.name", "tvm-bot"]) git(["config", "user.email", "95660001+tvm-bot@users.noreply.github.com"]) git(["commit", "-m", message]) diff --git a/docker/dev_common.sh b/docker/dev_common.sh index 59ab8db395a1..016d49e9f792 100644 --- a/docker/dev_common.sh +++ b/docker/dev_common.sh @@ -28,24 +28,10 @@ INVOCATION_PWD="$(pwd)" GIT_TOPLEVEL=$(cd $(dirname ${BASH_SOURCE[0]}) && git rev-parse --show-toplevel) -function filter_jenkinsfile() { - local echo_on=0; - while read line; do - if [ "${line}" == "// NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. -->" ]; then - echo_on=1 - elif [ "${line}" == "// <--- End of regex-scanned config." ]; then - break - elif [ ${echo_on} -eq 1 ]; then - echo "$line" - fi - done -} - - function lookup_image_spec() { - img_line=$(cat "${GIT_TOPLEVEL}/Jenkinsfile" | filter_jenkinsfile | grep -E "^${1} = ") + img_line=$(python3 "${GIT_TOPLEVEL}/ci/jenkins/data.py" "$1") if [ -n "${img_line}" ]; then - img_spec=$(echo "${img_line}" | sed -E "s/${1} = '([^\"]*)'/\1/") + img_spec=$(echo "${img_line}" | sed -E "s/${1} = \"([^\"]*)\"/\1/") has_similar_docker_image=1 docker inspect "${1}" &>/dev/null || has_similar_docker_image=0 if [ ${has_similar_docker_image} -ne 0 ]; then diff --git a/tests/lint/check_file_type.py b/tests/lint/check_file_type.py index 2b8b61c41361..f5d5a2f0a370 100644 --- a/tests/lint/check_file_type.py +++ b/tests/lint/check_file_type.py @@ -89,6 +89,8 @@ "ld", # Jinja2 templates "j2", + # Jenkinsfiles + "groovy", } # List of file names allowed diff --git a/tests/lint/rat-excludes b/tests/lint/rat-excludes index 1cdb78e31913..e6338dc81b3c 100644 --- a/tests/lint/rat-excludes +++ b/tests/lint/rat-excludes @@ -52,10 +52,5 @@ MANIFEST rat-excludes Cargo.lock -# Included template files -Build.groovy.j2 -Deploy.groovy.j2 -DockerBuild.groovy.j2 -Lint.groovy.j2 -Prepare.groovy.j2 -Test.groovy.j2 +# Template files for Jenkins +.*\.groovy\.j2 From 2a90431a86bb281e251f4742cd5e8c50f35cd748 Mon Sep 17 00:00:00 2001 From: driazati Date: Thu, 1 Dec 2022 17:23:35 -0800 Subject: [PATCH 2/5] Rebase --- ci/jenkins/Build.groovy.j2 | 236 --- ci/jenkins/DockerBuild.groovy.j2 | 118 -- ci/jenkins/Jenkinsfile.j2 | 138 -- ci/jenkins/Lint.groovy.j2 | 19 - ci/jenkins/Test.groovy.j2 | 344 ----- ci/jenkins/generated/arm_jenkinsfile.groovy | 929 ++++++++++++ .../generated/cortexm_jenkinsfile.groovy | 1109 ++++++++++++++ ci/jenkins/generated/cpu_jenkinsfile.groovy | 827 +++++++++++ .../generated/docker_jenkinsfile.groovy | 945 ++++++++++++ ci/jenkins/generated/gpu_jenkinsfile.groovy | 1279 +++++++++++++++++ .../generated/hexagon_jenkinsfile.groovy | 916 ++++++++++++ ci/jenkins/generated/i386_jenkinsfile.groovy | 678 +++++++++ ci/jenkins/generated/lint_jenkinsfile.groovy | 530 +++++++ .../generated/minimal_jenkinsfile.groovy | 573 ++++++++ ci/jenkins/generated/riscv_jenkinsfile.groovy | 579 ++++++++ ci/jenkins/generated/wasm_jenkinsfile.groovy | 515 +++++++ .../templates/arm_jenkinsfile.groovy.j2 | 81 ++ .../templates/cortexm_jenkinsfile.groovy.j2 | 48 + .../templates/cpu_jenkinsfile.groovy.j2 | 81 ++ .../docker_jenkinsfile.groovy.j2} | 177 ++- .../templates/gpu_jenkinsfile.groovy.j2 | 190 +++ .../templates/hexagon_jenkinsfile.groovy.j2 | 47 + .../templates/i386_jenkinsfile.groovy.j2 | 49 + .../templates/lint_jenkinsfile.groovy.j2 | 42 + .../templates/minimal_jenkinsfile.groovy.j2 | 38 + .../templates/riscv_jenkinsfile.groovy.j2 | 46 + ci/jenkins/templates/utils/Build.groovy.j2 | 57 + .../{ => templates/utils}/Prepare.groovy.j2 | 3 +- ci/jenkins/templates/utils/Test.groovy.j2 | 13 + ci/jenkins/templates/utils/base.groovy.j2 | 6 +- .../templates/wasm_jenkinsfile.groovy.j2 | 24 + 31 files changed, 9714 insertions(+), 923 deletions(-) delete mode 100644 ci/jenkins/Build.groovy.j2 delete mode 100644 ci/jenkins/DockerBuild.groovy.j2 delete mode 100644 ci/jenkins/Jenkinsfile.j2 delete mode 100644 ci/jenkins/Lint.groovy.j2 delete mode 100644 ci/jenkins/Test.groovy.j2 create mode 100644 ci/jenkins/generated/arm_jenkinsfile.groovy create mode 100644 ci/jenkins/generated/cortexm_jenkinsfile.groovy create mode 100644 ci/jenkins/generated/cpu_jenkinsfile.groovy create mode 100644 ci/jenkins/generated/docker_jenkinsfile.groovy create mode 100644 ci/jenkins/generated/gpu_jenkinsfile.groovy create mode 100644 ci/jenkins/generated/hexagon_jenkinsfile.groovy create mode 100644 ci/jenkins/generated/i386_jenkinsfile.groovy create mode 100644 ci/jenkins/generated/lint_jenkinsfile.groovy create mode 100644 ci/jenkins/generated/minimal_jenkinsfile.groovy create mode 100644 ci/jenkins/generated/riscv_jenkinsfile.groovy create mode 100644 ci/jenkins/generated/wasm_jenkinsfile.groovy create mode 100644 ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 create mode 100644 ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 rename ci/jenkins/{Deploy.groovy.j2 => templates/docker_jenkinsfile.groovy.j2} (51%) create mode 100644 ci/jenkins/templates/gpu_jenkinsfile.groovy.j2 create mode 100644 ci/jenkins/templates/hexagon_jenkinsfile.groovy.j2 create mode 100644 ci/jenkins/templates/i386_jenkinsfile.groovy.j2 create mode 100644 ci/jenkins/templates/lint_jenkinsfile.groovy.j2 create mode 100644 ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 create mode 100644 ci/jenkins/templates/riscv_jenkinsfile.groovy.j2 create mode 100644 ci/jenkins/templates/utils/Build.groovy.j2 rename ci/jenkins/{ => templates/utils}/Prepare.groovy.j2 (98%) create mode 100644 ci/jenkins/templates/utils/Test.groovy.j2 create mode 100644 ci/jenkins/templates/wasm_jenkinsfile.groovy.j2 diff --git a/ci/jenkins/Build.groovy.j2 b/ci/jenkins/Build.groovy.j2 deleted file mode 100644 index 7592079ef8d1..000000000000 --- a/ci/jenkins/Build.groovy.j2 +++ /dev/null @@ -1,236 +0,0 @@ -def ci_setup(image) { - sh ( - script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", - label: 'Clean up old workspace', - ) -} - -def python_unittest(image) { - sh ( - script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", - label: 'Run Python unit tests', - ) -} - -def fsim_test(image) { - sh ( - script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", - label: 'Run VTA tests in FSIM', - ) -} - -def make_standalone_crt(image, build_dir) { - sh ( - script: """ - set -eux - ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ - --sccache-bucket tvm-sccache-prod \ - --cmake-target standalone_crt \ - --build-dir build - ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ - --sccache-bucket tvm-sccache-prod \ - --cmake-target crttest \ - --build-dir build - """, - label: 'Make standalone CRT', - ) -} - -def make_cpp_tests(image, build_dir) { - sh ( - script: """ - set -eux - ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ - --sccache-bucket tvm-sccache-prod \ - --cmake-target cpptest \ - --build-dir ${build_dir} - """, - label: 'Make C++ tests', - ) -} - -def cmake_build(image, path, make_flag) { - sh ( - script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", - label: 'Run cmake build', - ) -} - -def build() { -stage('Build') { - environment { - SKIP_SLOW_TESTS = "${skip_slow_tests}" - } - parallel( - - {% call m.build_step( - name='BUILD: GPU', - node='CPU-SMALL', - condition='!skip_ci', - ws='tvm/build-gpu', - docker_image='ci_gpu', - ) %} - sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh build" - cmake_build("${ci_gpu} --no-gpu", 'build', '-j2') - make_standalone_crt("${ci_gpu} --no-gpu", 'build') - {{ m.upload_artifacts(tag='gpu', filenames=tvm_multilib + tvm_allvisible + microtvm_template_projects + crttest + standalone_crt) }} - - // compiler test - sh "rm -rf build" - sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu_other.sh build" - cmake_build("${ci_gpu} --no-gpu", 'build', '-j2') - make_standalone_crt("${ci_gpu} --no-gpu", 'build') - {{ m.upload_artifacts(tag='gpu2', filenames=tvm_lib + crttest + standalone_crt) }} - {% endcall %} - - {% call m.build_step( - name='BUILD: CPU', - node='CPU-SMALL', - condition='!skip_ci && is_docs_only_build != 1', - ws='tvm/build-cpu', - docker_image='ci_cpu', - ) %} - sh ( - script: "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh build", - label: 'Create CPU cmake config', - ) - cmake_build(ci_cpu, 'build', '-j2') - make_standalone_crt(ci_cpu, 'build') - make_cpp_tests(ci_cpu, 'build') - {{ m.upload_artifacts(tag='cpu', filenames=tvm_multilib_tsim + tvm_allvisible + crttest + cpptest + standalone_crt) }} - ci_setup(ci_cpu) - // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" - // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch - sh (script: "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh", label: 'Rust build and test') - {% endcall %} - - {% call m.build_step( - name='BUILD: CPU MINIMAL', - node='CPU-SMALL', - condition='!skip_ci && is_docs_only_build != 1', - ws='tvm/build-cpu-minimal', - docker_image='ci_minimal', - ) %} - sh ( - script: "${docker_run} ${ci_minimal} ./tests/scripts/task_config_build_minimal.sh build", - label: 'Create CPU minimal cmake config', - ) - cmake_build(ci_minimal, 'build', '-j2') - make_standalone_crt(ci_minimal, 'build') - make_cpp_tests(ci_minimal, 'build') - {{ m.upload_artifacts(tag='cpu-minimal', filenames=tvm_lib + tvm_allvisible + crttest + cpptest + standalone_crt) }} - {% endcall %} - - {% call m.build_step( - name='BUILD: WASM', - node='CPU-SMALL', - condition='!skip_ci && is_docs_only_build != 1', - ws='tvm/build-wasm', - docker_image='ci_wasm', - ) %} - sh ( - script: "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh build", - label: 'Create WASM cmake config', - ) - cmake_build(ci_wasm, 'build', '-j2') - make_standalone_crt(ci_wasm, 'build') - make_cpp_tests(ci_wasm, 'build') - cpp_unittest(ci_wasm) - ci_setup(ci_wasm) - sh ( - script: "${docker_run} ${ci_wasm} ./tests/scripts/task_web_wasm.sh", - label: 'Run WASM lint and tests', - ) - {% endcall %} - - {% call m.build_step( - name='BUILD: i386', - node='CPU-SMALL', - condition='!skip_ci && is_docs_only_build != 1', - ws='tvm/build-i386', - docker_image='ci_i386', - ) %} - sh ( - script: "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh build", - label: 'Create i386 cmake config', - ) - cmake_build(ci_i386, 'build', '-j2') - make_standalone_crt(ci_i386, 'build') - make_cpp_tests(ci_i386, 'build') - {{ m.upload_artifacts(tag='i386', filenames=tvm_multilib_tsim + standalone_crt + crttest + cpptest) }} - {% endcall %} - - {% call m.build_step( - name='BUILD: arm', - node='ARM-SMALL', - condition='!skip_ci && is_docs_only_build != 1', - ws='tvm/build-arm', - docker_image='ci_arm', - ) %} - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh build", - label: 'Create ARM cmake config', - ) - cmake_build(ci_arm, 'build', '-j4') - make_standalone_crt(ci_arm, 'build') - make_cpp_tests(ci_arm, 'build') - {{ m.upload_artifacts(tag='arm', filenames=tvm_multilib + cpptest + crttest + standalone_crt) }} - {% endcall %} - - {% call m.build_step( - name='BUILD: Cortex-M', - node='CPU-SMALL', - condition='!skip_ci && is_docs_only_build != 1', - ws='tvm/build-cortexm', - docker_image='ci_cortexm', - ) %} - sh ( - script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_config_build_cortexm.sh build", - label: 'Create Cortex-M cmake config', - ) - cmake_build(ci_cortexm, 'build', '-j2') - make_standalone_crt(ci_cortexm, 'build') - make_cpp_tests(ci_cortexm, 'build') - {{ m.upload_artifacts(tag='cortexm', filenames=tvm_lib + tvm_allvisible + crttest + standalone_crt + cpptest + microtvm_template_projects) }} - {% endcall %} - - {% call m.build_step( - name='BUILD: Hexagon', - node='CPU-SMALL', - condition='!skip_ci && is_docs_only_build != 1', - ws='tvm/build-hexagon', - docker_image='ci_hexagon', - ) %} - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_config_build_hexagon.sh build", - label: 'Create Hexagon cmake config', - ) - cmake_build(ci_hexagon, 'build', '-j2') - make_cpp_tests(ci_hexagon, 'build') - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_build_hexagon_api.sh", - label: 'Build Hexagon API', - ) - {{ m.upload_artifacts(tag='hexagon', filenames=tvm_lib + cpptest + hexagon_api) }} - {% endcall %} - - {% call m.build_step( - name='BUILD: RISC-V', - node='CPU-SMALL', - condition='!skip_ci && is_docs_only_build != 1', - ws='tvm/build-riscv', - docker_image='ci_riscv', - ) %} - sh ( - script: "${docker_run} ${ci_riscv} ./tests/scripts/task_config_build_riscv.sh build", - label: 'Create RISC-V cmake config', - ) - cmake_build(ci_riscv, 'build', '-j2') - make_standalone_crt(ci_riscv, 'build') - make_cpp_tests(ci_riscv, 'build') - {{ m.upload_artifacts(tag='riscv', filenames=tvm_lib + tvm_allvisible + standalone_crt + crttest + cpptest + microtvm_template_projects) }} - {% endcall %} - - ) -} -} diff --git a/ci/jenkins/DockerBuild.groovy.j2 b/ci/jenkins/DockerBuild.groovy.j2 deleted file mode 100644 index 69e0db4f9e4f..000000000000 --- a/ci/jenkins/DockerBuild.groovy.j2 +++ /dev/null @@ -1,118 +0,0 @@ -def ecr_push(full_name) { - aws_account_id = sh( - returnStdout: true, - script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', - label: 'Get AWS ID' - ).trim() - - def ecr_name = "${aws_account_id}.{{ aws_ecr_url }}/${full_name}" - try { - withEnv([ - "AWS_ACCOUNT_ID=${aws_account_id}", - 'AWS_DEFAULT_REGION={{ aws_default_region }}', - "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { - sh( - script: ''' - set -eux - aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO - ''', - label: 'Log in to ECR' - ) - sh( - script: """ - set -x - . ${jenkins_scripts_root}/retry.sh - docker tag ${full_name} \$AWS_ECR_REPO/${full_name} - retry 5 docker push \$AWS_ECR_REPO/${full_name} - """, - label: 'Upload image to ECR' - ) - } - } finally { - withEnv([ - "AWS_ACCOUNT_ID=${aws_account_id}", - 'AWS_DEFAULT_REGION={{ aws_default_region }}', - "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { - sh( - script: 'docker logout $AWS_ECR_REPO', - label: 'Clean up login credentials' - ) - } - } - return ecr_name -} - -def ecr_pull(full_name) { - aws_account_id = sh( - returnStdout: true, - script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', - label: 'Get AWS ID' - ).trim() - - try { - withEnv([ - "AWS_ACCOUNT_ID=${aws_account_id}", - 'AWS_DEFAULT_REGION={{ aws_default_region }}', - "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { - sh( - script: ''' - set -eux - aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO - ''', - label: 'Log in to ECR' - ) - sh( - script: """ - set -eux - . ${jenkins_scripts_root}/retry.sh - retry 5 docker pull ${full_name} - """, - label: 'Pull image from ECR' - ) - } - } finally { - withEnv([ - "AWS_ACCOUNT_ID=${aws_account_id}", - 'AWS_DEFAULT_REGION={{ aws_default_region }}', - "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { - sh( - script: 'docker logout $AWS_ECR_REPO', - label: 'Clean up login credentials' - ) - } - } -} - -def build_image(image_name) { - hash = sh( - returnStdout: true, - script: 'git log -1 --format=\'%h\'' - ).trim() - def full_name = "${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}" - sh( - script: "${docker_build} ${image_name} --spec ${full_name}", - label: 'Build docker image' - ) - return ecr_push(full_name) -} - - -def build_docker_images() { - stage('Docker Image Build') { - parallel( - {% for image in images %} - '{{ image.name }}': { - node('{{ image.platform }}') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - // We're purposefully not setting the built image here since they - // are not yet being uploaded to tlcpack - // {{ image.name }} = build_image('{{ image.name }}') - built_{{ image.name }} = build_image('{{ image.name }}'); - } - } - }, - {% endfor %} - ) - } -} diff --git a/ci/jenkins/Jenkinsfile.j2 b/ci/jenkins/Jenkinsfile.j2 deleted file mode 100644 index 3aa44294966e..000000000000 --- a/ci/jenkins/Jenkinsfile.j2 +++ /dev/null @@ -1,138 +0,0 @@ -#!groovy -// -*- mode: groovy -*- - -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Jenkins pipeline -// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ - -// Docker env used for testing -// Different image may have different version tag -// because some of them are more stable than anoter. -// -// Docker images are maintained by PMC, cached in dockerhub -// and remains relatively stable over the time. -// Flow for upgrading docker env(need commiter) -// -// - Send PR to upgrade build script in the repo -// - Build the new docker image -// - Tag the docker image with a new version and push to a binary cache. -// - Update the version in the Jenkinsfile, send a PR -// - Fix any issues wrt to the new image version in the PR -// - Merge the PR and now we are in new version -// - Tag the new version as the lates -// - Periodically cleanup the old versions on local workers -// - -// ============================= IMPORTANT NOTE ============================= -// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! -// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with -// 'python3 jenkins/generate.py' -// Note: This timestamp is here to ensure that updates to the Jenkinsfile are -// always rebased on main before merging: -// Generated at {{ generated_time }} - -import org.jenkinsci.plugins.pipeline.modeldefinition.Utils -{% import 'ci/jenkins/macros.j2' as m with context -%} - -// NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. --> -ci_lint = 'tlcpack/ci-lint:20221128-070141-ae4fd7df7' -ci_gpu = 'tlcpack/ci-gpu:20221128-070141-ae4fd7df7' -ci_cpu = 'tlcpack/ci-cpu:20221128-070141-ae4fd7df7' -ci_minimal = 'tlcpack/ci-minimal:20221128-070141-ae4fd7df7' -ci_wasm = 'tlcpack/ci-wasm:20221128-070141-ae4fd7df7' -ci_i386 = 'tlcpack/ci-i386:20221128-070141-ae4fd7df7' -ci_cortexm = 'tlcpack/ci-cortexm:20221128-070141-ae4fd7df7' -ci_arm = 'tlcpack/ci-arm:20221128-070141-ae4fd7df7' -ci_hexagon = 'tlcpack/ci-hexagon:20221025-182121-e41d0ed6e' -ci_riscv = 'tlcpack/ci-riscv:20221128-070141-ae4fd7df7' -// <--- End of regex-scanned config. - -// Parameters to allow overriding (in Jenkins UI), the images -// to be used by a given build. When provided, they take precedence -// over default values above. -properties([ - parameters([ - {% for image in images %} - string(name: '{{ image.name }}_param', defaultValue: ''), - {% endfor %} - ]) -]) - -// Placeholders for newly built Docker image names (if rebuild_docker_images -// is used) -{% for image in images %} - built_{{ image.name }} = null; -{% endfor %} - -// Global variable assigned during Sanity Check that holds the sha1 which should be -// merged into the PR in all branches. -upstream_revision = null - -// command to start a docker container -docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' -docker_build = 'docker/build.sh' -// timeout in minutes -max_time = 180 -rebuild_docker_images = false - -// Filenames for stashing between build and test steps -{% set tvm_runtime = ['build/libtvm_runtime.so', 'build/config.cmake'] %} -{% set crttest = ['build/crttest'] %} -{% set tvm_allvisible = ['build/libtvm_allvisible.so'] %} -{% set cpptest = ['build/cpptest', 'build/build.ninja', 'build/CMakeFiles/rules.ninja'] %} -{% set tvm_lib = ['build/libtvm.so'] + tvm_runtime %} -{% set tvm_multilib = ['build/libtvm.so', 'build/libvta_fsim.so'] + tvm_runtime %} -{% set tvm_multilib_tsim = ['build/libvta_tsim.so'] + tvm_multilib %} -{% set microtvm_template_projects = ['build/microtvm_template_projects',] %} -{% set hexagon_api = ['build/hexagon_api_output',] %} -{% set standalone_crt = ['build/standalone_crt', 'build/build.ninja'] %} -s3_bucket = 'tvm-jenkins-artifacts-prod' -s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" - -// Jenkins script root directory -jenkins_scripts_root = "ci/scripts/jenkins" -{% set aws_default_region = "us-west-2" %} -{% set aws_ecr_url = "dkr.ecr." + aws_default_region + ".amazonaws.com" %} - -// General note: Jenkins has limits on the size of a method (or top level code) -// that are pretty strict, so most usage of groovy methods in these templates -// are purely to satisfy the JVM -{% include "ci/jenkins/Prepare.groovy.j2" %} -{% include "ci/jenkins/DockerBuild.groovy.j2" %} -{% include "ci/jenkins/Lint.groovy.j2" %} -{% include "ci/jenkins/Build.groovy.j2" %} -{% include "ci/jenkins/Test.groovy.j2" %} -{% include "ci/jenkins/Deploy.groovy.j2" %} - - -cancel_previous_build() - -prepare() - -if (rebuild_docker_images) { - build_docker_images() -} - -lint() - -build() - -test() - -deploy() diff --git a/ci/jenkins/Lint.groovy.j2 b/ci/jenkins/Lint.groovy.j2 deleted file mode 100644 index 3ede64301c93..000000000000 --- a/ci/jenkins/Lint.groovy.j2 +++ /dev/null @@ -1,19 +0,0 @@ -def lint() { - stage('Lint') { - parallel( - {% call m.sharded_lint_step( - name='Lint', - num_shards=2, - node='CPU-SMALL', - ws='tvm/lint', - docker_image='ci_lint', - ) - %} - sh ( - script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", - label: 'Run lint', - ) - {% endcall %} - ) - } -} diff --git a/ci/jenkins/Test.groovy.j2 b/ci/jenkins/Test.groovy.j2 deleted file mode 100644 index 274a3e2dce6c..000000000000 --- a/ci/jenkins/Test.groovy.j2 +++ /dev/null @@ -1,344 +0,0 @@ -{% set test_method_names = [] %} - -def cpp_unittest(image) { - sh ( - script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", - label: 'Run C++ tests', - ) -} - -def micro_cpp_unittest(image) { - sh ( - script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", - label: 'Run microTVM C++ tests', - ) -} - -// We have to do this whacky split of the code from where it's used since the -// JVM limits method length to 64k and we easily exceed that with all this -// autogenerated code. This makes it so each test step is in its own method so -// that each individual method isn't too big. -{% call(shard_index, num_shards) m.sharded_test_step( - name="unittest: GPU", - num_shards=3, - node="GPU", - ws="tvm/ut-python-gpu", - platform="gpu", - docker_image="ci_gpu", - test_method_names=test_method_names, -) %} - {% if shard_index == 1 %} - {{ m.download_artifacts(tag='gpu2') }} - sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu_other.sh build" - // These require a GPU to finish the build (i.e. CUDA needs to be load-able) - make_standalone_crt(ci_gpu, 'build') - // make_cpp_tests(ci_gpu, 'build') - // cpp_unittest(ci_gpu) - - sh "rm -rf build" - {{ m.download_artifacts(tag='gpu') }} - ci_setup(ci_gpu) - sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh build" - make_standalone_crt(ci_gpu, 'build') - make_cpp_tests(ci_gpu, 'build') - cpp_unittest(ci_gpu) - micro_cpp_unittest(ci_gpu) - {% else %} - {{ m.download_artifacts(tag='gpu') }} - ci_setup(ci_gpu) - {% endif %} - {% if shard_index == 2 or num_shards < 2 %} - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh", - label: 'Run Java unit tests', - ) - {% endif %} - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", - label: 'Run Python GPU unit tests', - ) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", - label: 'Run Python GPU integration tests', - ) -{% endcall %} -{% call(shard_index, num_shards) m.sharded_test_step( - name="integration: CPU", - node="CPU-SMALL", - num_shards=4, - ws="tvm/integration-python-cpu", - platform="cpu", - docker_image="ci_cpu", - test_method_names=test_method_names, -) %} - {{ m.download_artifacts(tag='cpu') }} - ci_setup(ci_cpu) - sh ( - script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", - label: 'Run CPU integration tests', - ) -{% endcall %} -{% call(shard_index, num_shards) m.sharded_test_step( - name="python: i386", - node="CPU-SMALL", - num_shards=3, - ws="tvm/integration-python-i386", - platform="i386", - docker_image="ci_i386", - test_method_names=test_method_names, -) %} - {{ m.download_artifacts(tag='i386') }} - ci_setup(ci_i386) - {% if shard_index == 1 %} - cpp_unittest(ci_i386) - micro_cpp_unittest(ci_i386) - {% endif %} - python_unittest(ci_i386) - sh ( - script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", - label: 'Run i386 integration tests', - ) - {% if shard_index == 2 or num_shards < 2 %} - fsim_test(ci_i386) - {% endif %} -{% endcall %} -{% call(shard_index, num_shards) m.sharded_test_step( - name="test: Hexagon", - node="CPU-SMALL", - ws="tvm/test-hexagon", - platform="hexagon", - docker_image="ci_hexagon", - test_method_names=test_method_names, - num_shards=8, -) %} - {{ m.download_artifacts(tag='hexagon') }} - ci_setup(ci_hexagon) - {% if shard_index == 1 %} - cpp_unittest(ci_hexagon) - {% endif %} - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", - label: 'Run Hexagon tests', - ) -{% endcall %} -{% call(shard_index, num_shards) m.sharded_test_step( - name="integration: aarch64", - num_shards=4, - node="ARM-SMALL", - ws="tvm/ut-python-arm", - platform="arm", - docker_image="ci_arm", - test_method_names=test_method_names, -) %} - {{ m.download_artifacts(tag='arm') }} - ci_setup(ci_arm) - python_unittest(ci_arm) - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", - label: 'Run CPU integration tests', - ) -{% endcall %} -{% call(shard_index, num_shards) m.sharded_test_step( - name="topi: GPU", - node="GPU", - num_shards=3, - ws="tvm/topi-python-gpu", - platform="gpu", - docker_image="ci_gpu", - test_method_names=test_method_names, -) %} - {{ m.download_artifacts(tag='gpu') }} - ci_setup(ci_gpu) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", - label: 'Run TOPI tests', - ) -{% endcall %} -{% call(shard_index, num_shards) m.sharded_test_step( - name="frontend: GPU", - node="GPU", - num_shards=6, - ws="tvm/frontend-python-gpu", - platform="gpu", - docker_image="ci_gpu", - test_method_names=test_method_names, -) %} - {{ m.download_artifacts(tag='gpu') }} - ci_setup(ci_gpu) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", - label: 'Run Python frontend tests', - ) -{% endcall %} -{% call(shard_index, num_shards) m.sharded_test_step( - name="topi: aarch64", - node="ARM-SMALL", - ws="tvm/ut-python-arm", - platform="arm", - docker_image="ci_arm", - num_shards=2, - test_method_names=test_method_names, -) %} - {{ m.download_artifacts(tag='arm') }} - ci_setup(ci_arm) - {% if shard_index == 1 %} - cpp_unittest(ci_arm) - micro_cpp_unittest(ci_arm) - {% endif %} - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh", - label: 'Run test_arm_compute_lib test', - ) - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_topi.sh", - label: 'Run TOPI tests', - ) -{% endcall %} -{% call(shard_index, num_shards) m.sharded_test_step( - name="frontend: aarch64", - node="ARM-SMALL", - ws="tvm/frontend-python-arm", - platform="arm", - docker_image="ci_arm", - num_shards=2, - test_method_names=test_method_names, -) %} - {{ m.download_artifacts(tag='arm') }} - ci_setup(ci_arm) - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_frontend_cpu.sh", - label: 'Run Python frontend tests', - ) -{% endcall %} -{% call(shard_index, num_shards) m.sharded_test_step( - name="test: Cortex-M", - node="CPU-SMALL", - ws="tvm/test-cortexm", - platform="cortexm", - docker_image="ci_cortexm", - num_shards=12, - test_method_names=test_method_names, -) %} - {{ m.download_artifacts(tag='cortexm') }} - ci_setup(ci_cortexm) - {% if shard_index == 1%} - cpp_unittest(ci_cortexm) - micro_cpp_unittest(ci_cortexm) - sh ( - script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_demo_microtvm.sh", - label: 'Run microTVM demos', - ) - {% endif %} - sh ( - script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", - label: 'Run microTVM tests', - ) -{% endcall %} -{% call(shard_index, num_shards) m.sharded_test_step( - name="test: RISC-V", - node="CPU-SMALL", - ws="tvm/test-riscv", - platform="riscv", - docker_image="ci_riscv", - num_shards=1, - test_method_names=test_method_names, -) %} - {{ m.download_artifacts(tag='riscv') }} - ci_setup(ci_riscv) - {% if shard_index == 1%} - cpp_unittest(ci_cortexm) - micro_cpp_unittest(ci_cortexm) - {% endif %} - sh ( - script: "${docker_run} ${ci_riscv} ./tests/scripts/task_riscv_microtvm.sh", - label: 'Run microTVM tests', - ) -{% endcall %} - -def run_unittest_minimal() { - {% call m.test_step_body( - name="unittest: CPU MINIMAL", - node="CPU-SMALL", - ws="tvm/ut-python-cpu-minimal", - platform="minimal", - docker_image="ci_minimal", - ) %} - {{ m.download_artifacts(tag='cpu-minimal') }} - cpp_unittest(ci_minimal) - micro_cpp_unittest(ci_minimal) - python_unittest(ci_minimal) - {% endcall %} -} - -def test() { -stage('Test') { - environment { - SKIP_SLOW_TESTS = "${skip_slow_tests}" - } - parallel( - {% for stage_name, method_name in test_method_names %} - '{{ stage_name }}': { - {{ method_name }}() - }, - {% endfor %} - 'unittest: CPU MINIMAL': { - run_unittest_minimal() - }, - {% call m.test_step( - name="unittest: CPU", - node="CPU-SMALL", - ws="tvm/ut-python-cpu", - platform="cpu", - docker_image="ci_cpu", - ) %} - {{ m.download_artifacts(tag='cpu') }} - ci_setup(ci_cpu) - cpp_unittest(ci_cpu) - micro_cpp_unittest(ci_cpu) - python_unittest(ci_cpu) - fsim_test(ci_cpu) - sh ( - script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh", - label: 'Run VTA tests in TSIM', - ) - {% endcall %} - {% call m.test_step( - name="frontend: CPU", - node="CPU-SMALL", - ws="tvm/frontend-python-cpu", - platform="cpu", - docker_image="ci_cpu", -) %} - {{ m.download_artifacts(tag='cpu') }} - ci_setup(ci_cpu) - sh ( - script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh", - label: 'Run Python frontend tests', - ) - {% endcall %} - 'docs: GPU': { - if (!skip_ci) { - node('GPU') { - ws({{ m.per_exec_ws('tvm/docs-python-gpu') }}) { - init_git() - docker_init(ci_gpu) - {{ m.download_artifacts(tag='gpu') }} - timeout(time: 180, unit: 'MINUTES') { - ci_setup(ci_gpu) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_docs.sh", - label: 'Build docs', - ) - } - {{ m.upload_artifacts(tag='docs', filenames=["docs.tgz"]) }} - sh( - script: "aws s3 cp --no-progress _docs s3://${s3_bucket}/${s3_prefix}/docs --recursive", - label: 'Upload docs to S3', - ) - } - } - } - }, - ) -} -} diff --git a/ci/jenkins/generated/arm_jenkinsfile.groovy b/ci/jenkins/generated/arm_jenkinsfile.groovy new file mode 100644 index 000000000000..cd6df5b208a9 --- /dev/null +++ b/ci/jenkins/generated/arm_jenkinsfile.groovy @@ -0,0 +1,929 @@ +#!groovy +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// Docker env used for testing +// Different image may have different version tag +// because some of them are more stable than anoter. +// +// Docker images are maintained by PMC, cached in dockerhub +// and remains relatively stable over the time. +// Flow for upgrading docker env(need commiter) +// +// - Send PR to upgrade build script in the repo +// - Build the new docker image +// - Tag the docker image with a new version and push to a binary cache. +// - Update the version in the Jenkinsfile, send a PR +// - Fix any issues wrt to the new image version in the PR +// - Merge the PR and now we are in new version +// - Tag the new version as the lates +// - Periodically cleanup the old versions on local workers +// + +// ============================= IMPORTANT NOTE ============================= +// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! +// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with +// 'python3 jenkins/generate.py' +// Note: This timestamp is here to ensure that updates to the Jenkinsfile are +// always rebased on main before merging: +// Generated at 2022-12-01T16:52:32.888364 + +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils +ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' +ci_gpu = 'tlcpack/ci-gpu:20221019-060125-0b4836739' +ci_cpu = 'tlcpack/ci-cpu:20221013-060115-61c9742ea' +ci_minimal = 'tlcpack/ci-minimal:20221013-060115-61c9742ea' +ci_wasm = 'tlcpack/ci-wasm:20221013-060115-61c9742ea' +ci_i386 = 'tlcpack/ci-i386:20221013-060115-61c9742ea' +ci_cortexm = 'tlcpack/ci-cortexm:20221013-060115-61c9742ea' +ci_arm = 'tlcpack/ci-arm:20221013-060115-61c9742ea' +ci_hexagon = 'tlcpack/ci-hexagon:20221013-060115-61c9742ea' +ci_riscv = 'tlcpack/ci-riscv:20221013-060115-61c9742ea' + +// Parameters to allow overriding (in Jenkins UI), the images +// to be used by a given build. When provided, they take precedence +// over default values above. +properties([ + parameters([ + string(name: 'ci_arm_param', defaultValue: ''), + string(name: 'ci_cortexm_param', defaultValue: ''), + string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_gpu_param', defaultValue: ''), + string(name: 'ci_hexagon_param', defaultValue: ''), + string(name: 'ci_i386_param', defaultValue: ''), + string(name: 'ci_lint_param', defaultValue: ''), + string(name: 'ci_minimal_param', defaultValue: ''), + string(name: 'ci_riscv_param', defaultValue: ''), + string(name: 'ci_wasm_param', defaultValue: ''), + ]) +]) + +// Placeholders for newly built Docker image names (if rebuild_docker_images +// is used) + built_ci_arm = null; + built_ci_cortexm = null; + built_ci_cpu = null; + built_ci_gpu = null; + built_ci_hexagon = null; + built_ci_i386 = null; + built_ci_lint = null; + built_ci_minimal = null; + built_ci_riscv = null; + built_ci_wasm = null; + +// Global variable assigned during Sanity Check that holds the sha1 which should be +// merged into the PR in all branches. +upstream_revision = null + +// command to start a docker container +docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' +docker_build = 'docker/build.sh' +// timeout in minutes +max_time = 180 +rebuild_docker_images = false + +s3_bucket = 'tvm-jenkins-artifacts-prod' +s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" + +// Jenkins script root directory +jenkins_scripts_root = "ci/scripts/jenkins" + + +// General note: Jenkins has limits on the size of a method (or top level code) +// that are pretty strict, so most usage of groovy methods in these templates +// are purely to satisfy the JVM +def per_exec_ws(folder) { + return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder +} + +// initialize source codes +def init_git() { + retry(5) { + checkout scm + } + + // Add more info about job node + sh ( + script: './tests/scripts/task_show_node_info.sh', + label: 'Show executor node info', + ) + + // Determine merge commit to use for all stages + if (env.BRANCH_NAME == 'main') { + // Only set upstream_revision to HEAD and skip merging to avoid a race with another commit merged to main. + update_upstream_revision("HEAD") + } else { + // This is PR branch so merge with latest main. + merge_with_main() + } + + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 3 timeout 5m git submodule update --init -f --jobs 0 + """, + label: 'Update git submodules', + ) + checkout_trusted_files() +} + +def update_upstream_revision(git_ref) { + if (upstream_revision == null) { + upstream_revision = sh( + script: "git log -1 ${git_ref} --format=\'%H\'", + label: 'Determine upstream revision', + returnStdout: true, + ).trim() + } +} + +def merge_with_main() { + sh ( + script: 'git fetch origin main', + label: 'Fetch upstream', + ) + update_upstream_revision("FETCH_HEAD") + sh ( + script: "git -c user.name=TVM-Jenkins -c user.email=jenkins@tvm.apache.org merge ${upstream_revision}", + label: 'Merge to origin/main' + ) +} + +def docker_init(image) { + // Clear out all Docker images that aren't going to be used + sh( + script: """ + set -eux + docker image ls --all + IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}} {{.ID}}') + + echo -e "Found images:\\n\$IMAGES" + echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs docker rmi || test \$? = 123; } + + docker image ls --all + """, + label: 'Clean old Docker images', + ) + + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + // ecr_pull(image) + sh "echo Pulling from AWS is not implemented && exit 1" + } else { + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 5 docker pull ${image} + """, + label: 'Pull docker image', + ) + } +} + +def should_skip_slow_tests(pr_number) { + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run slow tests, exit code of 0 means skip slow tests + result = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr '${pr_number}'", + label: 'Check if CI should run slow tests', + ) + } + return result == 0 +} + +def cancel_previous_build() { + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != 'main') { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } +} + +def checkout_trusted_files() { + // trust everything from branch builds + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + return; + } + + // trust peoople listed in CONTRIBUTING.md + grep_code = sh( + returnStatus: true, + script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep '@${env.CHANGE_AUTHOR}'", + label: 'Check if change is from a contributor', + ) + + if (grep_code == 1) { + // Any scripts that run on the bare host and not inside a Docker container + // (especially those that access secrets) should be checked out here so + // only trusted versions are used in CI + sh( + script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.", + label: 'Check out trusted files', + ) + } +} + +def should_skip_ci(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + glob_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci_globs.py", + label: 'Check if CI should be skipped due to changed files', + ) + if (glob_skip_ci_code == 0) { + return true + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run full CI (or the script had an error, so run + // full CI just in case). Exit code of 0 means skip CI. + git_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'", + label: 'Check if CI should be skipped', + ) + } + return git_skip_ci_code == 0 +} + +def check_pr(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + sh ( + script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}", + label: 'Check PR title and body', + ) + } + +} + +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() + + check_pr(env.CHANGE_ID) + + if (env.DETERMINE_DOCKER_IMAGES == 'yes') { + sh( + script: "./${jenkins_scripts_root}/determine_docker_images.py ci_arm=${ci_arm} ci_cortexm=${ci_cortexm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_minimal=${ci_minimal} ci_riscv=${ci_riscv} ci_wasm=${ci_wasm} ", + label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', + ) + // Pull image names from the results of should_rebuild_docker.py + ci_arm = sh( + script: "cat .docker-image-names/ci_arm", + label: "Find docker image name for ci_arm", + returnStdout: true, + ).trim() + ci_cortexm = sh( + script: "cat .docker-image-names/ci_cortexm", + label: "Find docker image name for ci_cortexm", + returnStdout: true, + ).trim() + ci_cpu = sh( + script: "cat .docker-image-names/ci_cpu", + label: "Find docker image name for ci_cpu", + returnStdout: true, + ).trim() + ci_gpu = sh( + script: "cat .docker-image-names/ci_gpu", + label: "Find docker image name for ci_gpu", + returnStdout: true, + ).trim() + ci_hexagon = sh( + script: "cat .docker-image-names/ci_hexagon", + label: "Find docker image name for ci_hexagon", + returnStdout: true, + ).trim() + ci_i386 = sh( + script: "cat .docker-image-names/ci_i386", + label: "Find docker image name for ci_i386", + returnStdout: true, + ).trim() + ci_lint = sh( + script: "cat .docker-image-names/ci_lint", + label: "Find docker image name for ci_lint", + returnStdout: true, + ).trim() + ci_minimal = sh( + script: "cat .docker-image-names/ci_minimal", + label: "Find docker image name for ci_minimal", + returnStdout: true, + ).trim() + ci_riscv = sh( + script: "cat .docker-image-names/ci_riscv", + label: "Find docker image name for ci_riscv", + returnStdout: true, + ).trim() + ci_wasm = sh( + script: "cat .docker-image-names/ci_wasm", + label: "Find docker image name for ci_wasm", + returnStdout: true, + ).trim() + } + + ci_arm = params.ci_arm_param ?: ci_arm + ci_cortexm = params.ci_cortexm_param ?: ci_cortexm + ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_gpu = params.ci_gpu_param ?: ci_gpu + ci_hexagon = params.ci_hexagon_param ?: ci_hexagon + ci_i386 = params.ci_i386_param ?: ci_i386 + ci_lint = params.ci_lint_param ?: ci_lint + ci_minimal = params.ci_minimal_param ?: ci_minimal + ci_riscv = params.ci_riscv_param ?: ci_riscv + ci_wasm = params.ci_wasm_param ?: ci_wasm + + sh (script: """ + echo "Docker images being used in this build:" + echo " ci_arm = ${ci_arm}" + echo " ci_cortexm = ${ci_cortexm}" + echo " ci_cpu = ${ci_cpu}" + echo " ci_gpu = ${ci_gpu}" + echo " ci_hexagon = ${ci_hexagon}" + echo " ci_i386 = ${ci_i386}" + echo " ci_lint = ${ci_lint}" + echo " ci_minimal = ${ci_minimal}" + echo " ci_riscv = ${ci_riscv}" + echo " ci_wasm = ${ci_wasm}" + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docs.sh", + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docker.sh", + label: 'Check for any docker changes', + ) + + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } + } + } + } +} +def ci_setup(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", + label: 'Clean up old workspace', + ) +} + +def python_unittest(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", + label: 'Run Python unit tests', + ) +} + +def fsim_test(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", + label: 'Run VTA tests in FSIM', + ) +} + +def make_standalone_crt(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target standalone_crt \ + --build-dir build + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target crttest \ + --build-dir build + """, + label: 'Make standalone CRT', + ) +} + +def make_cpp_tests(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target cpptest \ + --build-dir ${build_dir} + """, + label: 'Make C++ tests', + ) +} + +def cmake_build(image, path, make_flag) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", + label: 'Run cmake build', + ) +} +def cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + label: 'Run C++ tests', + ) +} + +def micro_cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", + label: 'Run microTVM C++ tests', + ) +} + +cancel_previous_build() + +prepare() +def build() { + stage('Build') { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-arm") { + init_git() + docker_init(ci_arm) + timeout(time: max_time, unit: 'MINUTES') { + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh build", + label: 'Create ARM cmake config', + ) + cmake_build(ci_arm, 'build', '-j4') + make_standalone_crt(ci_arm, 'build') + make_cpp_tests(ci_arm, 'build') + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/arm --items build/libtvm.so build/libvta_fsim.so build/libtvm_runtime.so build/config.cmake build/cpptest build/build.ninja build/CMakeFiles/rules.ninja build/crttest build/standalone_crt build/build.ninja", + label: 'Upload artifacts to S3', + ) + } + } + } + } else { + Utils.markStageSkippedForConditional('BUILD: arm') + } + } +} +build() + + + +def shard_run_integration_aarch64_1_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + try { + init_git() + docker_init(ci_arm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TEST_STEP_NAME=integration: aarch64', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/arm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_arm) + python_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/integration_aarch64 --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('integration: aarch64 1 of 4') + } +} + +def shard_run_integration_aarch64_2_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + try { + init_git() + docker_init(ci_arm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TEST_STEP_NAME=integration: aarch64', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=1', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/arm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_arm) + python_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/integration_aarch64 --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('integration: aarch64 2 of 4') + } +} + +def shard_run_integration_aarch64_3_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + try { + init_git() + docker_init(ci_arm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TEST_STEP_NAME=integration: aarch64', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=2', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/arm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_arm) + python_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/integration_aarch64 --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('integration: aarch64 3 of 4') + } +} + +def shard_run_integration_aarch64_4_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + try { + init_git() + docker_init(ci_arm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TEST_STEP_NAME=integration: aarch64', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=3', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/arm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_arm) + python_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/integration_aarch64 --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('integration: aarch64 4 of 4') + } +} + + + +def shard_run_topi_aarch64_1_of_2() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + try { + init_git() + docker_init(ci_arm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TEST_STEP_NAME=topi: aarch64', + 'TVM_NUM_SHARDS=2', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/arm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_arm) + cpp_unittest(ci_arm) + micro_cpp_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh", + label: 'Run test_arm_compute_lib test', + ) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/topi_aarch64 --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('topi: aarch64 1 of 2') + } +} + +def shard_run_topi_aarch64_2_of_2() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + try { + init_git() + docker_init(ci_arm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TEST_STEP_NAME=topi: aarch64', + 'TVM_NUM_SHARDS=2', + 'TVM_SHARD_INDEX=1', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/arm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh", + label: 'Run test_arm_compute_lib test', + ) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/topi_aarch64 --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('topi: aarch64 2 of 2') + } +} + + + +def shard_run_frontend_aarch64_1_of_2() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-arm") { + try { + init_git() + docker_init(ci_arm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TEST_STEP_NAME=frontend: aarch64', + 'TVM_NUM_SHARDS=2', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/arm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_frontend_cpu.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/frontend_aarch64 --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: aarch64 1 of 2') + } +} + +def shard_run_frontend_aarch64_2_of_2() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-arm") { + try { + init_git() + docker_init(ci_arm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TEST_STEP_NAME=frontend: aarch64', + 'TVM_NUM_SHARDS=2', + 'TVM_SHARD_INDEX=1', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/arm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_frontend_cpu.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/frontend_aarch64 --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: aarch64 2 of 2') + } +} + + +def test() { + stage('Test') { + environment { + SKIP_SLOW_TESTS = "${skip_slow_tests}" + } + parallel( + 'integration: aarch64 1 of 4': { + shard_run_integration_aarch64_1_of_4() + }, + 'integration: aarch64 2 of 4': { + shard_run_integration_aarch64_2_of_4() + }, + 'integration: aarch64 3 of 4': { + shard_run_integration_aarch64_3_of_4() + }, + 'integration: aarch64 4 of 4': { + shard_run_integration_aarch64_4_of_4() + }, + 'topi: aarch64 1 of 2': { + shard_run_topi_aarch64_1_of_2() + }, + 'topi: aarch64 2 of 2': { + shard_run_topi_aarch64_2_of_2() + }, + 'frontend: aarch64 1 of 2': { + shard_run_frontend_aarch64_1_of_2() + }, + 'frontend: aarch64 2 of 2': { + shard_run_frontend_aarch64_2_of_2() + }, + ) + } +} +test() diff --git a/ci/jenkins/generated/cortexm_jenkinsfile.groovy b/ci/jenkins/generated/cortexm_jenkinsfile.groovy new file mode 100644 index 000000000000..36ec0bd38b45 --- /dev/null +++ b/ci/jenkins/generated/cortexm_jenkinsfile.groovy @@ -0,0 +1,1109 @@ +#!groovy +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// Docker env used for testing +// Different image may have different version tag +// because some of them are more stable than anoter. +// +// Docker images are maintained by PMC, cached in dockerhub +// and remains relatively stable over the time. +// Flow for upgrading docker env(need commiter) +// +// - Send PR to upgrade build script in the repo +// - Build the new docker image +// - Tag the docker image with a new version and push to a binary cache. +// - Update the version in the Jenkinsfile, send a PR +// - Fix any issues wrt to the new image version in the PR +// - Merge the PR and now we are in new version +// - Tag the new version as the lates +// - Periodically cleanup the old versions on local workers +// + +// ============================= IMPORTANT NOTE ============================= +// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! +// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with +// 'python3 jenkins/generate.py' +// Note: This timestamp is here to ensure that updates to the Jenkinsfile are +// always rebased on main before merging: +// Generated at 2022-12-01T17:04:06.689168 + +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils +ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' +ci_gpu = 'tlcpack/ci-gpu:20221019-060125-0b4836739' +ci_cpu = 'tlcpack/ci-cpu:20221013-060115-61c9742ea' +ci_minimal = 'tlcpack/ci-minimal:20221013-060115-61c9742ea' +ci_wasm = 'tlcpack/ci-wasm:20221013-060115-61c9742ea' +ci_i386 = 'tlcpack/ci-i386:20221013-060115-61c9742ea' +ci_cortexm = 'tlcpack/ci-cortexm:20221013-060115-61c9742ea' +ci_arm = 'tlcpack/ci-arm:20221013-060115-61c9742ea' +ci_hexagon = 'tlcpack/ci-hexagon:20221013-060115-61c9742ea' +ci_riscv = 'tlcpack/ci-riscv:20221013-060115-61c9742ea' + +// Parameters to allow overriding (in Jenkins UI), the images +// to be used by a given build. When provided, they take precedence +// over default values above. +properties([ + parameters([ + string(name: 'ci_arm_param', defaultValue: ''), + string(name: 'ci_cortexm_param', defaultValue: ''), + string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_gpu_param', defaultValue: ''), + string(name: 'ci_hexagon_param', defaultValue: ''), + string(name: 'ci_i386_param', defaultValue: ''), + string(name: 'ci_lint_param', defaultValue: ''), + string(name: 'ci_minimal_param', defaultValue: ''), + string(name: 'ci_riscv_param', defaultValue: ''), + string(name: 'ci_wasm_param', defaultValue: ''), + ]) +]) + +// Placeholders for newly built Docker image names (if rebuild_docker_images +// is used) + built_ci_arm = null; + built_ci_cortexm = null; + built_ci_cpu = null; + built_ci_gpu = null; + built_ci_hexagon = null; + built_ci_i386 = null; + built_ci_lint = null; + built_ci_minimal = null; + built_ci_riscv = null; + built_ci_wasm = null; + +// Global variable assigned during Sanity Check that holds the sha1 which should be +// merged into the PR in all branches. +upstream_revision = null + +// command to start a docker container +docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' +docker_build = 'docker/build.sh' +// timeout in minutes +max_time = 180 +rebuild_docker_images = false + +s3_bucket = 'tvm-jenkins-artifacts-prod' +s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" + +// Jenkins script root directory +jenkins_scripts_root = "ci/scripts/jenkins" + + +// General note: Jenkins has limits on the size of a method (or top level code) +// that are pretty strict, so most usage of groovy methods in these templates +// are purely to satisfy the JVM +def per_exec_ws(folder) { + return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder +} + +// initialize source codes +def init_git() { + retry(5) { + checkout scm + } + + // Add more info about job node + sh ( + script: './tests/scripts/task_show_node_info.sh', + label: 'Show executor node info', + ) + + // Determine merge commit to use for all stages + if (env.BRANCH_NAME == 'main') { + // Only set upstream_revision to HEAD and skip merging to avoid a race with another commit merged to main. + update_upstream_revision("HEAD") + } else { + // This is PR branch so merge with latest main. + merge_with_main() + } + + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 3 timeout 5m git submodule update --init -f --jobs 0 + """, + label: 'Update git submodules', + ) + checkout_trusted_files() +} + +def update_upstream_revision(git_ref) { + if (upstream_revision == null) { + upstream_revision = sh( + script: "git log -1 ${git_ref} --format=\'%H\'", + label: 'Determine upstream revision', + returnStdout: true, + ).trim() + } +} + +def merge_with_main() { + sh ( + script: 'git fetch origin main', + label: 'Fetch upstream', + ) + update_upstream_revision("FETCH_HEAD") + sh ( + script: "git -c user.name=TVM-Jenkins -c user.email=jenkins@tvm.apache.org merge ${upstream_revision}", + label: 'Merge to origin/main' + ) +} + +def docker_init(image) { + // Clear out all Docker images that aren't going to be used + sh( + script: """ + set -eux + docker image ls --all + IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}} {{.ID}}') + + echo -e "Found images:\\n\$IMAGES" + echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs docker rmi || test \$? = 123; } + + docker image ls --all + """, + label: 'Clean old Docker images', + ) + + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + // ecr_pull(image) + sh "echo Pulling from AWS is not implemented && exit 1" + } else { + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 5 docker pull ${image} + """, + label: 'Pull docker image', + ) + } +} + +def should_skip_slow_tests(pr_number) { + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run slow tests, exit code of 0 means skip slow tests + result = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr '${pr_number}'", + label: 'Check if CI should run slow tests', + ) + } + return result == 0 +} + +def cancel_previous_build() { + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != 'main') { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } +} + +def checkout_trusted_files() { + // trust everything from branch builds + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + return; + } + + // trust peoople listed in CONTRIBUTING.md + grep_code = sh( + returnStatus: true, + script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep '@${env.CHANGE_AUTHOR}'", + label: 'Check if change is from a contributor', + ) + + if (grep_code == 1) { + // Any scripts that run on the bare host and not inside a Docker container + // (especially those that access secrets) should be checked out here so + // only trusted versions are used in CI + sh( + script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.", + label: 'Check out trusted files', + ) + } +} + +def should_skip_ci(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + glob_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci_globs.py", + label: 'Check if CI should be skipped due to changed files', + ) + if (glob_skip_ci_code == 0) { + return true + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run full CI (or the script had an error, so run + // full CI just in case). Exit code of 0 means skip CI. + git_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'", + label: 'Check if CI should be skipped', + ) + } + return git_skip_ci_code == 0 +} + +def check_pr(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + sh ( + script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}", + label: 'Check PR title and body', + ) + } + +} + +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() + + check_pr(env.CHANGE_ID) + + if (env.DETERMINE_DOCKER_IMAGES == 'yes') { + sh( + script: "./${jenkins_scripts_root}/determine_docker_images.py ci_arm=${ci_arm} ci_cortexm=${ci_cortexm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_minimal=${ci_minimal} ci_riscv=${ci_riscv} ci_wasm=${ci_wasm} ", + label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', + ) + // Pull image names from the results of should_rebuild_docker.py + ci_arm = sh( + script: "cat .docker-image-names/ci_arm", + label: "Find docker image name for ci_arm", + returnStdout: true, + ).trim() + ci_cortexm = sh( + script: "cat .docker-image-names/ci_cortexm", + label: "Find docker image name for ci_cortexm", + returnStdout: true, + ).trim() + ci_cpu = sh( + script: "cat .docker-image-names/ci_cpu", + label: "Find docker image name for ci_cpu", + returnStdout: true, + ).trim() + ci_gpu = sh( + script: "cat .docker-image-names/ci_gpu", + label: "Find docker image name for ci_gpu", + returnStdout: true, + ).trim() + ci_hexagon = sh( + script: "cat .docker-image-names/ci_hexagon", + label: "Find docker image name for ci_hexagon", + returnStdout: true, + ).trim() + ci_i386 = sh( + script: "cat .docker-image-names/ci_i386", + label: "Find docker image name for ci_i386", + returnStdout: true, + ).trim() + ci_lint = sh( + script: "cat .docker-image-names/ci_lint", + label: "Find docker image name for ci_lint", + returnStdout: true, + ).trim() + ci_minimal = sh( + script: "cat .docker-image-names/ci_minimal", + label: "Find docker image name for ci_minimal", + returnStdout: true, + ).trim() + ci_riscv = sh( + script: "cat .docker-image-names/ci_riscv", + label: "Find docker image name for ci_riscv", + returnStdout: true, + ).trim() + ci_wasm = sh( + script: "cat .docker-image-names/ci_wasm", + label: "Find docker image name for ci_wasm", + returnStdout: true, + ).trim() + } + + ci_arm = params.ci_arm_param ?: ci_arm + ci_cortexm = params.ci_cortexm_param ?: ci_cortexm + ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_gpu = params.ci_gpu_param ?: ci_gpu + ci_hexagon = params.ci_hexagon_param ?: ci_hexagon + ci_i386 = params.ci_i386_param ?: ci_i386 + ci_lint = params.ci_lint_param ?: ci_lint + ci_minimal = params.ci_minimal_param ?: ci_minimal + ci_riscv = params.ci_riscv_param ?: ci_riscv + ci_wasm = params.ci_wasm_param ?: ci_wasm + + sh (script: """ + echo "Docker images being used in this build:" + echo " ci_arm = ${ci_arm}" + echo " ci_cortexm = ${ci_cortexm}" + echo " ci_cpu = ${ci_cpu}" + echo " ci_gpu = ${ci_gpu}" + echo " ci_hexagon = ${ci_hexagon}" + echo " ci_i386 = ${ci_i386}" + echo " ci_lint = ${ci_lint}" + echo " ci_minimal = ${ci_minimal}" + echo " ci_riscv = ${ci_riscv}" + echo " ci_wasm = ${ci_wasm}" + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docs.sh", + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docker.sh", + label: 'Check for any docker changes', + ) + + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } + } + } + } +} +def ci_setup(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", + label: 'Clean up old workspace', + ) +} + +def python_unittest(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", + label: 'Run Python unit tests', + ) +} + +def fsim_test(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", + label: 'Run VTA tests in FSIM', + ) +} + +def make_standalone_crt(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target standalone_crt \ + --build-dir build + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target crttest \ + --build-dir build + """, + label: 'Make standalone CRT', + ) +} + +def make_cpp_tests(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target cpptest \ + --build-dir ${build_dir} + """, + label: 'Make C++ tests', + ) +} + +def cmake_build(image, path, make_flag) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", + label: 'Run cmake build', + ) +} +def cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + label: 'Run C++ tests', + ) +} + +def micro_cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", + label: 'Run microTVM C++ tests', + ) +} + +cancel_previous_build() + +prepare() +def build() { + stage('Build') { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-cortexm") { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_config_build_cortexm.sh build", + label: 'Create Cortex-M cmake config', + ) + cmake_build(ci_cortexm, 'build', '-j2') + make_standalone_crt(ci_cortexm, 'build') + make_cpp_tests(ci_cortexm, 'build') + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm --items build/libtvm.so build/libtvm_runtime.so build/config.cmake build/libtvm_allvisible.so build/crttest build/standalone_crt build/build.ninja build/cpptest build/build.ninja build/CMakeFiles/rules.ninja build/microtvm_template_projects", + label: 'Upload artifacts to S3', + ) + } + } + } + } else { + Utils.markStageSkippedForConditional('BUILD: Cortex-M') + } + } +} +build() + + + +def shard_run_test_Cortex_M_1_of_12() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { + try { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cortexm', + 'TEST_STEP_NAME=test: Cortex-M', + 'TVM_NUM_SHARDS=12', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cortexm) + cpp_unittest(ci_cortexm) + micro_cpp_unittest(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_demo_microtvm.sh", + label: 'Run microTVM demos', + ) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Cortex_M --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Cortex-M 1 of 12') + } +} + +def shard_run_test_Cortex_M_2_of_12() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { + try { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cortexm', + 'TEST_STEP_NAME=test: Cortex-M', + 'TVM_NUM_SHARDS=12', + 'TVM_SHARD_INDEX=1', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Cortex_M --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Cortex-M 2 of 12') + } +} + +def shard_run_test_Cortex_M_3_of_12() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { + try { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cortexm', + 'TEST_STEP_NAME=test: Cortex-M', + 'TVM_NUM_SHARDS=12', + 'TVM_SHARD_INDEX=2', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Cortex_M --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Cortex-M 3 of 12') + } +} + +def shard_run_test_Cortex_M_4_of_12() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { + try { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cortexm', + 'TEST_STEP_NAME=test: Cortex-M', + 'TVM_NUM_SHARDS=12', + 'TVM_SHARD_INDEX=3', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Cortex_M --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Cortex-M 4 of 12') + } +} + +def shard_run_test_Cortex_M_5_of_12() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { + try { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cortexm', + 'TEST_STEP_NAME=test: Cortex-M', + 'TVM_NUM_SHARDS=12', + 'TVM_SHARD_INDEX=4', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Cortex_M --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Cortex-M 5 of 12') + } +} + +def shard_run_test_Cortex_M_6_of_12() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { + try { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cortexm', + 'TEST_STEP_NAME=test: Cortex-M', + 'TVM_NUM_SHARDS=12', + 'TVM_SHARD_INDEX=5', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Cortex_M --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Cortex-M 6 of 12') + } +} + +def shard_run_test_Cortex_M_7_of_12() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { + try { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cortexm', + 'TEST_STEP_NAME=test: Cortex-M', + 'TVM_NUM_SHARDS=12', + 'TVM_SHARD_INDEX=6', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Cortex_M --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Cortex-M 7 of 12') + } +} + +def shard_run_test_Cortex_M_8_of_12() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { + try { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cortexm', + 'TEST_STEP_NAME=test: Cortex-M', + 'TVM_NUM_SHARDS=12', + 'TVM_SHARD_INDEX=7', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Cortex_M --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Cortex-M 8 of 12') + } +} + +def shard_run_test_Cortex_M_9_of_12() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { + try { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cortexm', + 'TEST_STEP_NAME=test: Cortex-M', + 'TVM_NUM_SHARDS=12', + 'TVM_SHARD_INDEX=8', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Cortex_M --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Cortex-M 9 of 12') + } +} + +def shard_run_test_Cortex_M_10_of_12() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { + try { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cortexm', + 'TEST_STEP_NAME=test: Cortex-M', + 'TVM_NUM_SHARDS=12', + 'TVM_SHARD_INDEX=9', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Cortex_M --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Cortex-M 10 of 12') + } +} + +def shard_run_test_Cortex_M_11_of_12() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { + try { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cortexm', + 'TEST_STEP_NAME=test: Cortex-M', + 'TVM_NUM_SHARDS=12', + 'TVM_SHARD_INDEX=10', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Cortex_M --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Cortex-M 11 of 12') + } +} + +def shard_run_test_Cortex_M_12_of_12() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-cortexm") { + try { + init_git() + docker_init(ci_cortexm) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cortexm', + 'TEST_STEP_NAME=test: Cortex-M', + 'TVM_NUM_SHARDS=12', + 'TVM_SHARD_INDEX=11', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cortexm", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Cortex_M --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Cortex-M 12 of 12') + } +} + + +def test() { + stage('Test') { + environment { + SKIP_SLOW_TESTS = "${skip_slow_tests}" + } + parallel( + 'test: Cortex-M 1 of 12': { + shard_run_test_Cortex_M_1_of_12() + }, + 'test: Cortex-M 2 of 12': { + shard_run_test_Cortex_M_2_of_12() + }, + 'test: Cortex-M 3 of 12': { + shard_run_test_Cortex_M_3_of_12() + }, + 'test: Cortex-M 4 of 12': { + shard_run_test_Cortex_M_4_of_12() + }, + 'test: Cortex-M 5 of 12': { + shard_run_test_Cortex_M_5_of_12() + }, + 'test: Cortex-M 6 of 12': { + shard_run_test_Cortex_M_6_of_12() + }, + 'test: Cortex-M 7 of 12': { + shard_run_test_Cortex_M_7_of_12() + }, + 'test: Cortex-M 8 of 12': { + shard_run_test_Cortex_M_8_of_12() + }, + 'test: Cortex-M 9 of 12': { + shard_run_test_Cortex_M_9_of_12() + }, + 'test: Cortex-M 10 of 12': { + shard_run_test_Cortex_M_10_of_12() + }, + 'test: Cortex-M 11 of 12': { + shard_run_test_Cortex_M_11_of_12() + }, + 'test: Cortex-M 12 of 12': { + shard_run_test_Cortex_M_12_of_12() + }, + ) + } +} +test() diff --git a/ci/jenkins/generated/cpu_jenkinsfile.groovy b/ci/jenkins/generated/cpu_jenkinsfile.groovy new file mode 100644 index 000000000000..8aee3c26ab11 --- /dev/null +++ b/ci/jenkins/generated/cpu_jenkinsfile.groovy @@ -0,0 +1,827 @@ +#!groovy +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// Docker env used for testing +// Different image may have different version tag +// because some of them are more stable than anoter. +// +// Docker images are maintained by PMC, cached in dockerhub +// and remains relatively stable over the time. +// Flow for upgrading docker env(need commiter) +// +// - Send PR to upgrade build script in the repo +// - Build the new docker image +// - Tag the docker image with a new version and push to a binary cache. +// - Update the version in the Jenkinsfile, send a PR +// - Fix any issues wrt to the new image version in the PR +// - Merge the PR and now we are in new version +// - Tag the new version as the lates +// - Periodically cleanup the old versions on local workers +// + +// ============================= IMPORTANT NOTE ============================= +// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! +// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with +// 'python3 jenkins/generate.py' +// Note: This timestamp is here to ensure that updates to the Jenkinsfile are +// always rebased on main before merging: +// Generated at 2022-12-01T17:05:22.995625 + +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils +ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' +ci_gpu = 'tlcpack/ci-gpu:20221019-060125-0b4836739' +ci_cpu = 'tlcpack/ci-cpu:20221013-060115-61c9742ea' +ci_minimal = 'tlcpack/ci-minimal:20221013-060115-61c9742ea' +ci_wasm = 'tlcpack/ci-wasm:20221013-060115-61c9742ea' +ci_i386 = 'tlcpack/ci-i386:20221013-060115-61c9742ea' +ci_cortexm = 'tlcpack/ci-cortexm:20221013-060115-61c9742ea' +ci_arm = 'tlcpack/ci-arm:20221013-060115-61c9742ea' +ci_hexagon = 'tlcpack/ci-hexagon:20221013-060115-61c9742ea' +ci_riscv = 'tlcpack/ci-riscv:20221013-060115-61c9742ea' + +// Parameters to allow overriding (in Jenkins UI), the images +// to be used by a given build. When provided, they take precedence +// over default values above. +properties([ + parameters([ + string(name: 'ci_arm_param', defaultValue: ''), + string(name: 'ci_cortexm_param', defaultValue: ''), + string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_gpu_param', defaultValue: ''), + string(name: 'ci_hexagon_param', defaultValue: ''), + string(name: 'ci_i386_param', defaultValue: ''), + string(name: 'ci_lint_param', defaultValue: ''), + string(name: 'ci_minimal_param', defaultValue: ''), + string(name: 'ci_riscv_param', defaultValue: ''), + string(name: 'ci_wasm_param', defaultValue: ''), + ]) +]) + +// Placeholders for newly built Docker image names (if rebuild_docker_images +// is used) + built_ci_arm = null; + built_ci_cortexm = null; + built_ci_cpu = null; + built_ci_gpu = null; + built_ci_hexagon = null; + built_ci_i386 = null; + built_ci_lint = null; + built_ci_minimal = null; + built_ci_riscv = null; + built_ci_wasm = null; + +// Global variable assigned during Sanity Check that holds the sha1 which should be +// merged into the PR in all branches. +upstream_revision = null + +// command to start a docker container +docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' +docker_build = 'docker/build.sh' +// timeout in minutes +max_time = 180 +rebuild_docker_images = false + +s3_bucket = 'tvm-jenkins-artifacts-prod' +s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" + +// Jenkins script root directory +jenkins_scripts_root = "ci/scripts/jenkins" + + +// General note: Jenkins has limits on the size of a method (or top level code) +// that are pretty strict, so most usage of groovy methods in these templates +// are purely to satisfy the JVM +def per_exec_ws(folder) { + return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder +} + +// initialize source codes +def init_git() { + retry(5) { + checkout scm + } + + // Add more info about job node + sh ( + script: './tests/scripts/task_show_node_info.sh', + label: 'Show executor node info', + ) + + // Determine merge commit to use for all stages + if (env.BRANCH_NAME == 'main') { + // Only set upstream_revision to HEAD and skip merging to avoid a race with another commit merged to main. + update_upstream_revision("HEAD") + } else { + // This is PR branch so merge with latest main. + merge_with_main() + } + + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 3 timeout 5m git submodule update --init -f --jobs 0 + """, + label: 'Update git submodules', + ) + checkout_trusted_files() +} + +def update_upstream_revision(git_ref) { + if (upstream_revision == null) { + upstream_revision = sh( + script: "git log -1 ${git_ref} --format=\'%H\'", + label: 'Determine upstream revision', + returnStdout: true, + ).trim() + } +} + +def merge_with_main() { + sh ( + script: 'git fetch origin main', + label: 'Fetch upstream', + ) + update_upstream_revision("FETCH_HEAD") + sh ( + script: "git -c user.name=TVM-Jenkins -c user.email=jenkins@tvm.apache.org merge ${upstream_revision}", + label: 'Merge to origin/main' + ) +} + +def docker_init(image) { + // Clear out all Docker images that aren't going to be used + sh( + script: """ + set -eux + docker image ls --all + IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}} {{.ID}}') + + echo -e "Found images:\\n\$IMAGES" + echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs docker rmi || test \$? = 123; } + + docker image ls --all + """, + label: 'Clean old Docker images', + ) + + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + // ecr_pull(image) + sh "echo Pulling from AWS is not implemented && exit 1" + } else { + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 5 docker pull ${image} + """, + label: 'Pull docker image', + ) + } +} + +def should_skip_slow_tests(pr_number) { + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run slow tests, exit code of 0 means skip slow tests + result = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr '${pr_number}'", + label: 'Check if CI should run slow tests', + ) + } + return result == 0 +} + +def cancel_previous_build() { + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != 'main') { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } +} + +def checkout_trusted_files() { + // trust everything from branch builds + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + return; + } + + // trust peoople listed in CONTRIBUTING.md + grep_code = sh( + returnStatus: true, + script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep '@${env.CHANGE_AUTHOR}'", + label: 'Check if change is from a contributor', + ) + + if (grep_code == 1) { + // Any scripts that run on the bare host and not inside a Docker container + // (especially those that access secrets) should be checked out here so + // only trusted versions are used in CI + sh( + script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.", + label: 'Check out trusted files', + ) + } +} + +def should_skip_ci(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + glob_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci_globs.py", + label: 'Check if CI should be skipped due to changed files', + ) + if (glob_skip_ci_code == 0) { + return true + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run full CI (or the script had an error, so run + // full CI just in case). Exit code of 0 means skip CI. + git_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'", + label: 'Check if CI should be skipped', + ) + } + return git_skip_ci_code == 0 +} + +def check_pr(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + sh ( + script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}", + label: 'Check PR title and body', + ) + } + +} + +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() + + check_pr(env.CHANGE_ID) + + if (env.DETERMINE_DOCKER_IMAGES == 'yes') { + sh( + script: "./${jenkins_scripts_root}/determine_docker_images.py ci_arm=${ci_arm} ci_cortexm=${ci_cortexm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_minimal=${ci_minimal} ci_riscv=${ci_riscv} ci_wasm=${ci_wasm} ", + label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', + ) + // Pull image names from the results of should_rebuild_docker.py + ci_arm = sh( + script: "cat .docker-image-names/ci_arm", + label: "Find docker image name for ci_arm", + returnStdout: true, + ).trim() + ci_cortexm = sh( + script: "cat .docker-image-names/ci_cortexm", + label: "Find docker image name for ci_cortexm", + returnStdout: true, + ).trim() + ci_cpu = sh( + script: "cat .docker-image-names/ci_cpu", + label: "Find docker image name for ci_cpu", + returnStdout: true, + ).trim() + ci_gpu = sh( + script: "cat .docker-image-names/ci_gpu", + label: "Find docker image name for ci_gpu", + returnStdout: true, + ).trim() + ci_hexagon = sh( + script: "cat .docker-image-names/ci_hexagon", + label: "Find docker image name for ci_hexagon", + returnStdout: true, + ).trim() + ci_i386 = sh( + script: "cat .docker-image-names/ci_i386", + label: "Find docker image name for ci_i386", + returnStdout: true, + ).trim() + ci_lint = sh( + script: "cat .docker-image-names/ci_lint", + label: "Find docker image name for ci_lint", + returnStdout: true, + ).trim() + ci_minimal = sh( + script: "cat .docker-image-names/ci_minimal", + label: "Find docker image name for ci_minimal", + returnStdout: true, + ).trim() + ci_riscv = sh( + script: "cat .docker-image-names/ci_riscv", + label: "Find docker image name for ci_riscv", + returnStdout: true, + ).trim() + ci_wasm = sh( + script: "cat .docker-image-names/ci_wasm", + label: "Find docker image name for ci_wasm", + returnStdout: true, + ).trim() + } + + ci_arm = params.ci_arm_param ?: ci_arm + ci_cortexm = params.ci_cortexm_param ?: ci_cortexm + ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_gpu = params.ci_gpu_param ?: ci_gpu + ci_hexagon = params.ci_hexagon_param ?: ci_hexagon + ci_i386 = params.ci_i386_param ?: ci_i386 + ci_lint = params.ci_lint_param ?: ci_lint + ci_minimal = params.ci_minimal_param ?: ci_minimal + ci_riscv = params.ci_riscv_param ?: ci_riscv + ci_wasm = params.ci_wasm_param ?: ci_wasm + + sh (script: """ + echo "Docker images being used in this build:" + echo " ci_arm = ${ci_arm}" + echo " ci_cortexm = ${ci_cortexm}" + echo " ci_cpu = ${ci_cpu}" + echo " ci_gpu = ${ci_gpu}" + echo " ci_hexagon = ${ci_hexagon}" + echo " ci_i386 = ${ci_i386}" + echo " ci_lint = ${ci_lint}" + echo " ci_minimal = ${ci_minimal}" + echo " ci_riscv = ${ci_riscv}" + echo " ci_wasm = ${ci_wasm}" + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docs.sh", + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docker.sh", + label: 'Check for any docker changes', + ) + + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } + } + } + } +} +def ci_setup(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", + label: 'Clean up old workspace', + ) +} + +def python_unittest(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", + label: 'Run Python unit tests', + ) +} + +def fsim_test(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", + label: 'Run VTA tests in FSIM', + ) +} + +def make_standalone_crt(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target standalone_crt \ + --build-dir build + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target crttest \ + --build-dir build + """, + label: 'Make standalone CRT', + ) +} + +def make_cpp_tests(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target cpptest \ + --build-dir ${build_dir} + """, + label: 'Make C++ tests', + ) +} + +def cmake_build(image, path, make_flag) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", + label: 'Run cmake build', + ) +} +def cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + label: 'Run C++ tests', + ) +} + +def micro_cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", + label: 'Run microTVM C++ tests', + ) +} + +cancel_previous_build() + +prepare() +def build() { + stage('Build') { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-cpu") { + init_git() + docker_init(ci_cpu) + timeout(time: max_time, unit: 'MINUTES') { + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh build", + label: 'Create CPU cmake config', + ) + cmake_build(ci_cpu, 'build', '-j2') + make_standalone_crt(ci_cpu, 'build') + make_cpp_tests(ci_cpu, 'build') + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/cpu --items build/libvta_tsim.so build/libtvm.so build/libvta_fsim.so build/libtvm_runtime.so build/config.cmake build/libtvm_allvisible.so build/crttest build/cpptest build/build.ninja build/CMakeFiles/rules.ninja build/standalone_crt build/build.ninja", + label: 'Upload artifacts to S3', + ) + + ci_setup(ci_cpu) + // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" + // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch + sh (script: "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh", label: 'Rust build and test') + } + } + } + } else { + Utils.markStageSkippedForConditional('BUILD: CPU') + } + } +} +build() + + + +def shard_run_integration_CPU_1_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { + try { + init_git() + docker_init(ci_cpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cpu', + 'TEST_STEP_NAME=integration: CPU', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/integration_CPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('integration: CPU 1 of 4') + } +} + +def shard_run_integration_CPU_2_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { + try { + init_git() + docker_init(ci_cpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cpu', + 'TEST_STEP_NAME=integration: CPU', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=1', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/integration_CPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('integration: CPU 2 of 4') + } +} + +def shard_run_integration_CPU_3_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { + try { + init_git() + docker_init(ci_cpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cpu', + 'TEST_STEP_NAME=integration: CPU', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=2', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/integration_CPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('integration: CPU 3 of 4') + } +} + +def shard_run_integration_CPU_4_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { + try { + init_git() + docker_init(ci_cpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cpu', + 'TEST_STEP_NAME=integration: CPU', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=3', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/integration_CPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('integration: CPU 4 of 4') + } +} + + + +def shard_run_unittest_CPU_1_of_1() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-cpu") { + try { + init_git() + docker_init(ci_cpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cpu', + 'TEST_STEP_NAME=unittest: CPU', + 'TVM_NUM_SHARDS=1', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cpu) + cpp_unittest(ci_cpu) + micro_cpp_unittest(ci_cpu) + python_unittest(ci_cpu) + fsim_test(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh", + label: 'Run VTA tests in TSIM', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/unittest_CPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('unittest: CPU 1 of 1') + } +} + + +def shard_run_frontend_CPU_1_of_1() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-cpu") { + try { + init_git() + docker_init(ci_cpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cpu', + 'TEST_STEP_NAME=frontend: CPU', + 'TVM_NUM_SHARDS=1', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/frontend_CPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: CPU 1 of 1') + } +} + + +def test() { + stage('Test') { + environment { + SKIP_SLOW_TESTS = "${skip_slow_tests}" + } + parallel( + 'integration: CPU 1 of 4': { + shard_run_integration_CPU_1_of_4() + }, + 'integration: CPU 2 of 4': { + shard_run_integration_CPU_2_of_4() + }, + 'integration: CPU 3 of 4': { + shard_run_integration_CPU_3_of_4() + }, + 'integration: CPU 4 of 4': { + shard_run_integration_CPU_4_of_4() + }, + 'unittest: CPU 1 of 1': { + shard_run_unittest_CPU_1_of_1() + }, + 'frontend: CPU 1 of 1': { + shard_run_frontend_CPU_1_of_1() + }, + ) + } +} +test() diff --git a/ci/jenkins/generated/docker_jenkinsfile.groovy b/ci/jenkins/generated/docker_jenkinsfile.groovy new file mode 100644 index 000000000000..593639163cc6 --- /dev/null +++ b/ci/jenkins/generated/docker_jenkinsfile.groovy @@ -0,0 +1,945 @@ +#!groovy +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// Docker env used for testing +// Different image may have different version tag +// because some of them are more stable than anoter. +// +// Docker images are maintained by PMC, cached in dockerhub +// and remains relatively stable over the time. +// Flow for upgrading docker env(need commiter) +// +// - Send PR to upgrade build script in the repo +// - Build the new docker image +// - Tag the docker image with a new version and push to a binary cache. +// - Update the version in the Jenkinsfile, send a PR +// - Fix any issues wrt to the new image version in the PR +// - Merge the PR and now we are in new version +// - Tag the new version as the lates +// - Periodically cleanup the old versions on local workers +// + +// ============================= IMPORTANT NOTE ============================= +// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! +// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with +// 'python3 jenkins/generate.py' +// Note: This timestamp is here to ensure that updates to the Jenkinsfile are +// always rebased on main before merging: +// Generated at 2022-12-01T17:07:24.754092 + +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils +ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' +ci_gpu = 'tlcpack/ci-gpu:20221019-060125-0b4836739' +ci_cpu = 'tlcpack/ci-cpu:20221013-060115-61c9742ea' +ci_minimal = 'tlcpack/ci-minimal:20221013-060115-61c9742ea' +ci_wasm = 'tlcpack/ci-wasm:20221013-060115-61c9742ea' +ci_i386 = 'tlcpack/ci-i386:20221013-060115-61c9742ea' +ci_cortexm = 'tlcpack/ci-cortexm:20221013-060115-61c9742ea' +ci_arm = 'tlcpack/ci-arm:20221013-060115-61c9742ea' +ci_hexagon = 'tlcpack/ci-hexagon:20221013-060115-61c9742ea' +ci_riscv = 'tlcpack/ci-riscv:20221013-060115-61c9742ea' + +// Parameters to allow overriding (in Jenkins UI), the images +// to be used by a given build. When provided, they take precedence +// over default values above. +properties([ + parameters([ + string(name: 'ci_arm_param', defaultValue: ''), + string(name: 'ci_cortexm_param', defaultValue: ''), + string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_gpu_param', defaultValue: ''), + string(name: 'ci_hexagon_param', defaultValue: ''), + string(name: 'ci_i386_param', defaultValue: ''), + string(name: 'ci_lint_param', defaultValue: ''), + string(name: 'ci_minimal_param', defaultValue: ''), + string(name: 'ci_riscv_param', defaultValue: ''), + string(name: 'ci_wasm_param', defaultValue: ''), + ]) +]) + +// Placeholders for newly built Docker image names (if rebuild_docker_images +// is used) + built_ci_arm = null; + built_ci_cortexm = null; + built_ci_cpu = null; + built_ci_gpu = null; + built_ci_hexagon = null; + built_ci_i386 = null; + built_ci_lint = null; + built_ci_minimal = null; + built_ci_riscv = null; + built_ci_wasm = null; + +// Global variable assigned during Sanity Check that holds the sha1 which should be +// merged into the PR in all branches. +upstream_revision = null + +// command to start a docker container +docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' +docker_build = 'docker/build.sh' +// timeout in minutes +max_time = 180 +rebuild_docker_images = false + +s3_bucket = 'tvm-jenkins-artifacts-prod' +s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" + +// Jenkins script root directory +jenkins_scripts_root = "ci/scripts/jenkins" + + +// General note: Jenkins has limits on the size of a method (or top level code) +// that are pretty strict, so most usage of groovy methods in these templates +// are purely to satisfy the JVM +def per_exec_ws(folder) { + return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder +} + +// initialize source codes +def init_git() { + retry(5) { + checkout scm + } + + // Add more info about job node + sh ( + script: './tests/scripts/task_show_node_info.sh', + label: 'Show executor node info', + ) + + // Determine merge commit to use for all stages + if (env.BRANCH_NAME == 'main') { + // Only set upstream_revision to HEAD and skip merging to avoid a race with another commit merged to main. + update_upstream_revision("HEAD") + } else { + // This is PR branch so merge with latest main. + merge_with_main() + } + + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 3 timeout 5m git submodule update --init -f --jobs 0 + """, + label: 'Update git submodules', + ) + checkout_trusted_files() +} + +def update_upstream_revision(git_ref) { + if (upstream_revision == null) { + upstream_revision = sh( + script: "git log -1 ${git_ref} --format=\'%H\'", + label: 'Determine upstream revision', + returnStdout: true, + ).trim() + } +} + +def merge_with_main() { + sh ( + script: 'git fetch origin main', + label: 'Fetch upstream', + ) + update_upstream_revision("FETCH_HEAD") + sh ( + script: "git -c user.name=TVM-Jenkins -c user.email=jenkins@tvm.apache.org merge ${upstream_revision}", + label: 'Merge to origin/main' + ) +} + +def docker_init(image) { + // Clear out all Docker images that aren't going to be used + sh( + script: """ + set -eux + docker image ls --all + IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}} {{.ID}}') + + echo -e "Found images:\\n\$IMAGES" + echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs docker rmi || test \$? = 123; } + + docker image ls --all + """, + label: 'Clean old Docker images', + ) + + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + // ecr_pull(image) + sh "echo Pulling from AWS is not implemented && exit 1" + } else { + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 5 docker pull ${image} + """, + label: 'Pull docker image', + ) + } +} + +def should_skip_slow_tests(pr_number) { + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run slow tests, exit code of 0 means skip slow tests + result = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr '${pr_number}'", + label: 'Check if CI should run slow tests', + ) + } + return result == 0 +} + +def cancel_previous_build() { + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != 'main') { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } +} + +def checkout_trusted_files() { + // trust everything from branch builds + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + return; + } + + // trust peoople listed in CONTRIBUTING.md + grep_code = sh( + returnStatus: true, + script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep '@${env.CHANGE_AUTHOR}'", + label: 'Check if change is from a contributor', + ) + + if (grep_code == 1) { + // Any scripts that run on the bare host and not inside a Docker container + // (especially those that access secrets) should be checked out here so + // only trusted versions are used in CI + sh( + script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.", + label: 'Check out trusted files', + ) + } +} + +def should_skip_ci(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + glob_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci_globs.py", + label: 'Check if CI should be skipped due to changed files', + ) + if (glob_skip_ci_code == 0) { + return true + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run full CI (or the script had an error, so run + // full CI just in case). Exit code of 0 means skip CI. + git_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'", + label: 'Check if CI should be skipped', + ) + } + return git_skip_ci_code == 0 +} + +def check_pr(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + sh ( + script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}", + label: 'Check PR title and body', + ) + } + +} + +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() + + check_pr(env.CHANGE_ID) + + if (env.DETERMINE_DOCKER_IMAGES == 'yes') { + sh( + script: "./${jenkins_scripts_root}/determine_docker_images.py ci_arm=${ci_arm} ci_cortexm=${ci_cortexm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_minimal=${ci_minimal} ci_riscv=${ci_riscv} ci_wasm=${ci_wasm} ", + label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', + ) + // Pull image names from the results of should_rebuild_docker.py + ci_arm = sh( + script: "cat .docker-image-names/ci_arm", + label: "Find docker image name for ci_arm", + returnStdout: true, + ).trim() + ci_cortexm = sh( + script: "cat .docker-image-names/ci_cortexm", + label: "Find docker image name for ci_cortexm", + returnStdout: true, + ).trim() + ci_cpu = sh( + script: "cat .docker-image-names/ci_cpu", + label: "Find docker image name for ci_cpu", + returnStdout: true, + ).trim() + ci_gpu = sh( + script: "cat .docker-image-names/ci_gpu", + label: "Find docker image name for ci_gpu", + returnStdout: true, + ).trim() + ci_hexagon = sh( + script: "cat .docker-image-names/ci_hexagon", + label: "Find docker image name for ci_hexagon", + returnStdout: true, + ).trim() + ci_i386 = sh( + script: "cat .docker-image-names/ci_i386", + label: "Find docker image name for ci_i386", + returnStdout: true, + ).trim() + ci_lint = sh( + script: "cat .docker-image-names/ci_lint", + label: "Find docker image name for ci_lint", + returnStdout: true, + ).trim() + ci_minimal = sh( + script: "cat .docker-image-names/ci_minimal", + label: "Find docker image name for ci_minimal", + returnStdout: true, + ).trim() + ci_riscv = sh( + script: "cat .docker-image-names/ci_riscv", + label: "Find docker image name for ci_riscv", + returnStdout: true, + ).trim() + ci_wasm = sh( + script: "cat .docker-image-names/ci_wasm", + label: "Find docker image name for ci_wasm", + returnStdout: true, + ).trim() + } + + ci_arm = params.ci_arm_param ?: ci_arm + ci_cortexm = params.ci_cortexm_param ?: ci_cortexm + ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_gpu = params.ci_gpu_param ?: ci_gpu + ci_hexagon = params.ci_hexagon_param ?: ci_hexagon + ci_i386 = params.ci_i386_param ?: ci_i386 + ci_lint = params.ci_lint_param ?: ci_lint + ci_minimal = params.ci_minimal_param ?: ci_minimal + ci_riscv = params.ci_riscv_param ?: ci_riscv + ci_wasm = params.ci_wasm_param ?: ci_wasm + + sh (script: """ + echo "Docker images being used in this build:" + echo " ci_arm = ${ci_arm}" + echo " ci_cortexm = ${ci_cortexm}" + echo " ci_cpu = ${ci_cpu}" + echo " ci_gpu = ${ci_gpu}" + echo " ci_hexagon = ${ci_hexagon}" + echo " ci_i386 = ${ci_i386}" + echo " ci_lint = ${ci_lint}" + echo " ci_minimal = ${ci_minimal}" + echo " ci_riscv = ${ci_riscv}" + echo " ci_wasm = ${ci_wasm}" + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docs.sh", + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docker.sh", + label: 'Check for any docker changes', + ) + + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } + } + } + } +} +def ci_setup(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", + label: 'Clean up old workspace', + ) +} + +def python_unittest(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", + label: 'Run Python unit tests', + ) +} + +def fsim_test(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", + label: 'Run VTA tests in FSIM', + ) +} + +def make_standalone_crt(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target standalone_crt \ + --build-dir build + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target crttest \ + --build-dir build + """, + label: 'Make standalone CRT', + ) +} + +def make_cpp_tests(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target cpptest \ + --build-dir ${build_dir} + """, + label: 'Make C++ tests', + ) +} + +def cmake_build(image, path, make_flag) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", + label: 'Run cmake build', + ) +} +def cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + label: 'Run C++ tests', + ) +} + +def micro_cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", + label: 'Run microTVM C++ tests', + ) +} + +cancel_previous_build() + +prepare() +def ecr_push(full_name) { + aws_account_id = sh( + returnStdout: true, + script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', + label: 'Get AWS ID' + ).trim() + + def ecr_name = "${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com/${full_name}" + try { + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION=us-west-2', + "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) { + sh( + script: ''' + set -eux + aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO + ''', + label: 'Log in to ECR' + ) + sh( + script: """ + set -x + . ${jenkins_scripts_root}/retry.sh + docker tag ${full_name} \$AWS_ECR_REPO/${full_name} + retry 5 docker push \$AWS_ECR_REPO/${full_name} + """, + label: 'Upload image to ECR' + ) + } + } finally { + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION=us-west-2', + "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) { + sh( + script: 'docker logout $AWS_ECR_REPO', + label: 'Clean up login credentials' + ) + } + } + return ecr_name +} + +def ecr_pull(full_name) { + aws_account_id = sh( + returnStdout: true, + script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', + label: 'Get AWS ID' + ).trim() + + try { + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION=us-west-2', + "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) { + sh( + script: ''' + set -eux + aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO + ''', + label: 'Log in to ECR' + ) + sh( + script: """ + set -eux + . ci/scripts/retry.sh + retry 5 docker pull ${full_name} + """, + label: 'Pull image from ECR' + ) + } + } finally { + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION=us-west-2', + "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) { + sh( + script: 'docker logout $AWS_ECR_REPO', + label: 'Clean up login credentials' + ) + } + } +} + +def build_image(image_name) { + hash = sh( + returnStdout: true, + script: 'git log -1 --format=\'%h\'' + ).trim() + def full_name = "${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}" + sh( + script: "${docker_build} ${image_name} --spec ${full_name}", + label: 'Build docker image' + ) + return ecr_push(full_name) +} + +def update_docker(ecr_image, hub_image) { + if (ecr_image == null) { + sh("image was not rebuilt, skipping") + return + } + if (!ecr_image.contains("amazonaws.com")) { + sh("echo \"Skipping '${ecr_image}' -> '${hub_image}' since it doesn\'t look like an ECR image\"") + return + } + docker_init(ecr_image) + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + docker tag \ + ${ecr_image} \ + ${hub_image} + retry 5 docker push ${hub_image} + """, + label: "Update ${hub_image} on Docker Hub", + ) +} + +stage('Docker Image Build') { + parallel( + 'ci_arm': { + node('ARM') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + // We're purposefully not setting the built image here since they + // are not yet being uploaded to tlcpack + // ci_arm = build_image('ci_arm') + built_ci_arm = build_image('ci_arm'); + } + } + }, + 'ci_cortexm': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + // We're purposefully not setting the built image here since they + // are not yet being uploaded to tlcpack + // ci_cortexm = build_image('ci_cortexm') + built_ci_cortexm = build_image('ci_cortexm'); + } + } + }, + 'ci_cpu': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + // We're purposefully not setting the built image here since they + // are not yet being uploaded to tlcpack + // ci_cpu = build_image('ci_cpu') + built_ci_cpu = build_image('ci_cpu'); + } + } + }, + 'ci_gpu': { + node('GPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + // We're purposefully not setting the built image here since they + // are not yet being uploaded to tlcpack + // ci_gpu = build_image('ci_gpu') + built_ci_gpu = build_image('ci_gpu'); + } + } + }, + 'ci_hexagon': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + // We're purposefully not setting the built image here since they + // are not yet being uploaded to tlcpack + // ci_hexagon = build_image('ci_hexagon') + built_ci_hexagon = build_image('ci_hexagon'); + } + } + }, + 'ci_i386': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + // We're purposefully not setting the built image here since they + // are not yet being uploaded to tlcpack + // ci_i386 = build_image('ci_i386') + built_ci_i386 = build_image('ci_i386'); + } + } + }, + 'ci_lint': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + // We're purposefully not setting the built image here since they + // are not yet being uploaded to tlcpack + // ci_lint = build_image('ci_lint') + built_ci_lint = build_image('ci_lint'); + } + } + }, + 'ci_minimal': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + // We're purposefully not setting the built image here since they + // are not yet being uploaded to tlcpack + // ci_minimal = build_image('ci_minimal') + built_ci_minimal = build_image('ci_minimal'); + } + } + }, + 'ci_riscv': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + // We're purposefully not setting the built image here since they + // are not yet being uploaded to tlcpack + // ci_riscv = build_image('ci_riscv') + built_ci_riscv = build_image('ci_riscv'); + } + } + }, + 'ci_wasm': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + // We're purposefully not setting the built image here since they + // are not yet being uploaded to tlcpack + // ci_wasm = build_image('ci_wasm') + built_ci_wasm = build_image('ci_wasm'); + } + } + }, + ) +} + +def deploy() { + stage('Deploy') { + if (env.BRANCH_NAME == 'main') { + parallel( + 'Upload built Docker images': { + if (env.DEPLOY_DOCKER_IMAGES == 'yes' && rebuild_docker_images && upstream_revision != null) { + node('CPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/deploy-docker") { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + try { + withCredentials([string( + credentialsId: 'dockerhub-tlcpackstaging-key', + variable: 'DOCKERHUB_KEY', + )]) { + sh( + script: 'docker login -u tlcpackstaging -p ${DOCKERHUB_KEY}', + label: 'Log in to Docker Hub', + ) + } + def date_Ymd_HMS = sh( + script: 'python3 -c \'import datetime; print(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))\'', + label: 'Determine date', + returnStdout: true, + ).trim() + def tag = "${date_Ymd_HMS}-${upstream_revision.substring(0, 8)}" + update_docker(built_ci_arm, "tlcpackstaging/ci_arm:${tag}") + update_docker(built_ci_cortexm, "tlcpackstaging/ci_cortexm:${tag}") + update_docker(built_ci_cpu, "tlcpackstaging/ci_cpu:${tag}") + update_docker(built_ci_gpu, "tlcpackstaging/ci_gpu:${tag}") + update_docker(built_ci_hexagon, "tlcpackstaging/ci_hexagon:${tag}") + update_docker(built_ci_i386, "tlcpackstaging/ci_i386:${tag}") + update_docker(built_ci_lint, "tlcpackstaging/ci_lint:${tag}") + update_docker(built_ci_minimal, "tlcpackstaging/ci_minimal:${tag}") + update_docker(built_ci_riscv, "tlcpackstaging/ci_riscv:${tag}") + update_docker(built_ci_wasm, "tlcpackstaging/ci_wasm:${tag}") + } finally { + sh( + script: 'docker logout', + label: 'Clean up login credentials' + ) + } + } + } + } + } else { + Utils.markStageSkippedForConditional('Upload built Docker images') + } + }, + 'Tag tlcpackstaging to tlcpack': { + if (env.DEPLOY_DOCKER_IMAGES == 'yes') { + node('CPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/tag-images") { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + withCredentials([string( + credentialsId: 'dockerhub-tlcpack-key', + variable: 'TLCPACK_TOKEN', + )]) { + try { + sh( + script: 'echo $TLCPACK_TOKEN | docker login --username octomldriazati --password-stdin', + label: 'Log in to Docker Hub' + ) + if (ci_arm.contains("tlcpackstaging")) { + // Push image to tlcpack + def tag = ci_arm.split(":")[1] + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + docker pull tlcpackstaging/ci_arm:${tag} + docker tag tlcpackstaging/ci_arm:${tag} tlcpack/ci-arm:${tag} + retry 5 docker push tlcpack/ci-arm:${tag} + """, + label: 'Tag tlcpackstaging/ci_arm image to tlcpack', + ) + } + if (ci_cortexm.contains("tlcpackstaging")) { + // Push image to tlcpack + def tag = ci_cortexm.split(":")[1] + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + docker pull tlcpackstaging/ci_cortexm:${tag} + docker tag tlcpackstaging/ci_cortexm:${tag} tlcpack/ci-cortexm:${tag} + retry 5 docker push tlcpack/ci-cortexm:${tag} + """, + label: 'Tag tlcpackstaging/ci_cortexm image to tlcpack', + ) + } + if (ci_cpu.contains("tlcpackstaging")) { + // Push image to tlcpack + def tag = ci_cpu.split(":")[1] + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + docker pull tlcpackstaging/ci_cpu:${tag} + docker tag tlcpackstaging/ci_cpu:${tag} tlcpack/ci-cpu:${tag} + retry 5 docker push tlcpack/ci-cpu:${tag} + """, + label: 'Tag tlcpackstaging/ci_cpu image to tlcpack', + ) + } + if (ci_gpu.contains("tlcpackstaging")) { + // Push image to tlcpack + def tag = ci_gpu.split(":")[1] + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + docker pull tlcpackstaging/ci_gpu:${tag} + docker tag tlcpackstaging/ci_gpu:${tag} tlcpack/ci-gpu:${tag} + retry 5 docker push tlcpack/ci-gpu:${tag} + """, + label: 'Tag tlcpackstaging/ci_gpu image to tlcpack', + ) + } + if (ci_hexagon.contains("tlcpackstaging")) { + // Push image to tlcpack + def tag = ci_hexagon.split(":")[1] + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + docker pull tlcpackstaging/ci_hexagon:${tag} + docker tag tlcpackstaging/ci_hexagon:${tag} tlcpack/ci-hexagon:${tag} + retry 5 docker push tlcpack/ci-hexagon:${tag} + """, + label: 'Tag tlcpackstaging/ci_hexagon image to tlcpack', + ) + } + if (ci_i386.contains("tlcpackstaging")) { + // Push image to tlcpack + def tag = ci_i386.split(":")[1] + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + docker pull tlcpackstaging/ci_i386:${tag} + docker tag tlcpackstaging/ci_i386:${tag} tlcpack/ci-i386:${tag} + retry 5 docker push tlcpack/ci-i386:${tag} + """, + label: 'Tag tlcpackstaging/ci_i386 image to tlcpack', + ) + } + if (ci_lint.contains("tlcpackstaging")) { + // Push image to tlcpack + def tag = ci_lint.split(":")[1] + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + docker pull tlcpackstaging/ci_lint:${tag} + docker tag tlcpackstaging/ci_lint:${tag} tlcpack/ci-lint:${tag} + retry 5 docker push tlcpack/ci-lint:${tag} + """, + label: 'Tag tlcpackstaging/ci_lint image to tlcpack', + ) + } + if (ci_minimal.contains("tlcpackstaging")) { + // Push image to tlcpack + def tag = ci_minimal.split(":")[1] + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + docker pull tlcpackstaging/ci_minimal:${tag} + docker tag tlcpackstaging/ci_minimal:${tag} tlcpack/ci-minimal:${tag} + retry 5 docker push tlcpack/ci-minimal:${tag} + """, + label: 'Tag tlcpackstaging/ci_minimal image to tlcpack', + ) + } + if (ci_riscv.contains("tlcpackstaging")) { + // Push image to tlcpack + def tag = ci_riscv.split(":")[1] + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + docker pull tlcpackstaging/ci_riscv:${tag} + docker tag tlcpackstaging/ci_riscv:${tag} tlcpack/ci-riscv:${tag} + retry 5 docker push tlcpack/ci-riscv:${tag} + """, + label: 'Tag tlcpackstaging/ci_riscv image to tlcpack', + ) + } + if (ci_wasm.contains("tlcpackstaging")) { + // Push image to tlcpack + def tag = ci_wasm.split(":")[1] + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + docker pull tlcpackstaging/ci_wasm:${tag} + docker tag tlcpackstaging/ci_wasm:${tag} tlcpack/ci-wasm:${tag} + retry 5 docker push tlcpack/ci-wasm:${tag} + """, + label: 'Tag tlcpackstaging/ci_wasm image to tlcpack', + ) + } + } finally { + sh( + script: 'docker logout', + label: 'Clean up login credentials' + ) + } + } + } + } + } + } else { + Utils.markStageSkippedForConditional('Tag tlcpackstaging to tlcpack') + } + }, + ) + } + } +} + +deploy() diff --git a/ci/jenkins/generated/gpu_jenkinsfile.groovy b/ci/jenkins/generated/gpu_jenkinsfile.groovy new file mode 100644 index 000000000000..c3f3723d8b53 --- /dev/null +++ b/ci/jenkins/generated/gpu_jenkinsfile.groovy @@ -0,0 +1,1279 @@ +#!groovy +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// Docker env used for testing +// Different image may have different version tag +// because some of them are more stable than anoter. +// +// Docker images are maintained by PMC, cached in dockerhub +// and remains relatively stable over the time. +// Flow for upgrading docker env(need commiter) +// +// - Send PR to upgrade build script in the repo +// - Build the new docker image +// - Tag the docker image with a new version and push to a binary cache. +// - Update the version in the Jenkinsfile, send a PR +// - Fix any issues wrt to the new image version in the PR +// - Merge the PR and now we are in new version +// - Tag the new version as the lates +// - Periodically cleanup the old versions on local workers +// + +// ============================= IMPORTANT NOTE ============================= +// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! +// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with +// 'python3 jenkins/generate.py' +// Note: This timestamp is here to ensure that updates to the Jenkinsfile are +// always rebased on main before merging: +// Generated at 2022-12-01T17:07:24.698570 + +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils +ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' +ci_gpu = 'tlcpack/ci-gpu:20221019-060125-0b4836739' +ci_cpu = 'tlcpack/ci-cpu:20221013-060115-61c9742ea' +ci_minimal = 'tlcpack/ci-minimal:20221013-060115-61c9742ea' +ci_wasm = 'tlcpack/ci-wasm:20221013-060115-61c9742ea' +ci_i386 = 'tlcpack/ci-i386:20221013-060115-61c9742ea' +ci_cortexm = 'tlcpack/ci-cortexm:20221013-060115-61c9742ea' +ci_arm = 'tlcpack/ci-arm:20221013-060115-61c9742ea' +ci_hexagon = 'tlcpack/ci-hexagon:20221013-060115-61c9742ea' +ci_riscv = 'tlcpack/ci-riscv:20221013-060115-61c9742ea' + +// Parameters to allow overriding (in Jenkins UI), the images +// to be used by a given build. When provided, they take precedence +// over default values above. +properties([ + parameters([ + string(name: 'ci_arm_param', defaultValue: ''), + string(name: 'ci_cortexm_param', defaultValue: ''), + string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_gpu_param', defaultValue: ''), + string(name: 'ci_hexagon_param', defaultValue: ''), + string(name: 'ci_i386_param', defaultValue: ''), + string(name: 'ci_lint_param', defaultValue: ''), + string(name: 'ci_minimal_param', defaultValue: ''), + string(name: 'ci_riscv_param', defaultValue: ''), + string(name: 'ci_wasm_param', defaultValue: ''), + ]) +]) + +// Placeholders for newly built Docker image names (if rebuild_docker_images +// is used) + built_ci_arm = null; + built_ci_cortexm = null; + built_ci_cpu = null; + built_ci_gpu = null; + built_ci_hexagon = null; + built_ci_i386 = null; + built_ci_lint = null; + built_ci_minimal = null; + built_ci_riscv = null; + built_ci_wasm = null; + +// Global variable assigned during Sanity Check that holds the sha1 which should be +// merged into the PR in all branches. +upstream_revision = null + +// command to start a docker container +docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' +docker_build = 'docker/build.sh' +// timeout in minutes +max_time = 180 +rebuild_docker_images = false + +s3_bucket = 'tvm-jenkins-artifacts-prod' +s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" + +// Jenkins script root directory +jenkins_scripts_root = "ci/scripts/jenkins" + + +// General note: Jenkins has limits on the size of a method (or top level code) +// that are pretty strict, so most usage of groovy methods in these templates +// are purely to satisfy the JVM +def per_exec_ws(folder) { + return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder +} + +// initialize source codes +def init_git() { + retry(5) { + checkout scm + } + + // Add more info about job node + sh ( + script: './tests/scripts/task_show_node_info.sh', + label: 'Show executor node info', + ) + + // Determine merge commit to use for all stages + if (env.BRANCH_NAME == 'main') { + // Only set upstream_revision to HEAD and skip merging to avoid a race with another commit merged to main. + update_upstream_revision("HEAD") + } else { + // This is PR branch so merge with latest main. + merge_with_main() + } + + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 3 timeout 5m git submodule update --init -f --jobs 0 + """, + label: 'Update git submodules', + ) + checkout_trusted_files() +} + +def update_upstream_revision(git_ref) { + if (upstream_revision == null) { + upstream_revision = sh( + script: "git log -1 ${git_ref} --format=\'%H\'", + label: 'Determine upstream revision', + returnStdout: true, + ).trim() + } +} + +def merge_with_main() { + sh ( + script: 'git fetch origin main', + label: 'Fetch upstream', + ) + update_upstream_revision("FETCH_HEAD") + sh ( + script: "git -c user.name=TVM-Jenkins -c user.email=jenkins@tvm.apache.org merge ${upstream_revision}", + label: 'Merge to origin/main' + ) +} + +def docker_init(image) { + // Clear out all Docker images that aren't going to be used + sh( + script: """ + set -eux + docker image ls --all + IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}} {{.ID}}') + + echo -e "Found images:\\n\$IMAGES" + echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs docker rmi || test \$? = 123; } + + docker image ls --all + """, + label: 'Clean old Docker images', + ) + + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + // ecr_pull(image) + sh "echo Pulling from AWS is not implemented && exit 1" + } else { + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 5 docker pull ${image} + """, + label: 'Pull docker image', + ) + } +} + +def should_skip_slow_tests(pr_number) { + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run slow tests, exit code of 0 means skip slow tests + result = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr '${pr_number}'", + label: 'Check if CI should run slow tests', + ) + } + return result == 0 +} + +def cancel_previous_build() { + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != 'main') { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } +} + +def checkout_trusted_files() { + // trust everything from branch builds + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + return; + } + + // trust peoople listed in CONTRIBUTING.md + grep_code = sh( + returnStatus: true, + script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep '@${env.CHANGE_AUTHOR}'", + label: 'Check if change is from a contributor', + ) + + if (grep_code == 1) { + // Any scripts that run on the bare host and not inside a Docker container + // (especially those that access secrets) should be checked out here so + // only trusted versions are used in CI + sh( + script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.", + label: 'Check out trusted files', + ) + } +} + +def should_skip_ci(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + glob_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci_globs.py", + label: 'Check if CI should be skipped due to changed files', + ) + if (glob_skip_ci_code == 0) { + return true + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run full CI (or the script had an error, so run + // full CI just in case). Exit code of 0 means skip CI. + git_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'", + label: 'Check if CI should be skipped', + ) + } + return git_skip_ci_code == 0 +} + +def check_pr(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + sh ( + script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}", + label: 'Check PR title and body', + ) + } + +} + +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() + + check_pr(env.CHANGE_ID) + + if (env.DETERMINE_DOCKER_IMAGES == 'yes') { + sh( + script: "./${jenkins_scripts_root}/determine_docker_images.py ci_arm=${ci_arm} ci_cortexm=${ci_cortexm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_minimal=${ci_minimal} ci_riscv=${ci_riscv} ci_wasm=${ci_wasm} ", + label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', + ) + // Pull image names from the results of should_rebuild_docker.py + ci_arm = sh( + script: "cat .docker-image-names/ci_arm", + label: "Find docker image name for ci_arm", + returnStdout: true, + ).trim() + ci_cortexm = sh( + script: "cat .docker-image-names/ci_cortexm", + label: "Find docker image name for ci_cortexm", + returnStdout: true, + ).trim() + ci_cpu = sh( + script: "cat .docker-image-names/ci_cpu", + label: "Find docker image name for ci_cpu", + returnStdout: true, + ).trim() + ci_gpu = sh( + script: "cat .docker-image-names/ci_gpu", + label: "Find docker image name for ci_gpu", + returnStdout: true, + ).trim() + ci_hexagon = sh( + script: "cat .docker-image-names/ci_hexagon", + label: "Find docker image name for ci_hexagon", + returnStdout: true, + ).trim() + ci_i386 = sh( + script: "cat .docker-image-names/ci_i386", + label: "Find docker image name for ci_i386", + returnStdout: true, + ).trim() + ci_lint = sh( + script: "cat .docker-image-names/ci_lint", + label: "Find docker image name for ci_lint", + returnStdout: true, + ).trim() + ci_minimal = sh( + script: "cat .docker-image-names/ci_minimal", + label: "Find docker image name for ci_minimal", + returnStdout: true, + ).trim() + ci_riscv = sh( + script: "cat .docker-image-names/ci_riscv", + label: "Find docker image name for ci_riscv", + returnStdout: true, + ).trim() + ci_wasm = sh( + script: "cat .docker-image-names/ci_wasm", + label: "Find docker image name for ci_wasm", + returnStdout: true, + ).trim() + } + + ci_arm = params.ci_arm_param ?: ci_arm + ci_cortexm = params.ci_cortexm_param ?: ci_cortexm + ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_gpu = params.ci_gpu_param ?: ci_gpu + ci_hexagon = params.ci_hexagon_param ?: ci_hexagon + ci_i386 = params.ci_i386_param ?: ci_i386 + ci_lint = params.ci_lint_param ?: ci_lint + ci_minimal = params.ci_minimal_param ?: ci_minimal + ci_riscv = params.ci_riscv_param ?: ci_riscv + ci_wasm = params.ci_wasm_param ?: ci_wasm + + sh (script: """ + echo "Docker images being used in this build:" + echo " ci_arm = ${ci_arm}" + echo " ci_cortexm = ${ci_cortexm}" + echo " ci_cpu = ${ci_cpu}" + echo " ci_gpu = ${ci_gpu}" + echo " ci_hexagon = ${ci_hexagon}" + echo " ci_i386 = ${ci_i386}" + echo " ci_lint = ${ci_lint}" + echo " ci_minimal = ${ci_minimal}" + echo " ci_riscv = ${ci_riscv}" + echo " ci_wasm = ${ci_wasm}" + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docs.sh", + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docker.sh", + label: 'Check for any docker changes', + ) + + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } + } + } + } +} +def ci_setup(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", + label: 'Clean up old workspace', + ) +} + +def python_unittest(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", + label: 'Run Python unit tests', + ) +} + +def fsim_test(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", + label: 'Run VTA tests in FSIM', + ) +} + +def make_standalone_crt(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target standalone_crt \ + --build-dir build + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target crttest \ + --build-dir build + """, + label: 'Make standalone CRT', + ) +} + +def make_cpp_tests(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target cpptest \ + --build-dir ${build_dir} + """, + label: 'Make C++ tests', + ) +} + +def cmake_build(image, path, make_flag) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", + label: 'Run cmake build', + ) +} +def cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + label: 'Run C++ tests', + ) +} + +def micro_cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", + label: 'Run microTVM C++ tests', + ) +} + +cancel_previous_build() + +prepare() +def build() { + stage('Build') { + if (!skip_ci) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-gpu") { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh build" + cmake_build("${ci_gpu} --no-gpu", 'build', '-j2') + make_standalone_crt("${ci_gpu} --no-gpu", 'build') + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu --items build/libtvm.so build/libvta_fsim.so build/libtvm_runtime.so build/config.cmake build/libtvm_allvisible.so build/microtvm_template_projects build/crttest build/standalone_crt build/build.ninja", + label: 'Upload artifacts to S3', + ) + + + // compiler test + sh "rm -rf build" + sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu_other.sh build" + cmake_build("${ci_gpu} --no-gpu", 'build', '-j2') + make_standalone_crt("${ci_gpu} --no-gpu", 'build') + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu2 --items build/libtvm.so build/libtvm_runtime.so build/config.cmake build/crttest build/standalone_crt build/build.ninja", + label: 'Upload artifacts to S3', + ) + } + } + } + } else { + Utils.markStageSkippedForConditional('BUILD: GPU') + } + } +} +build() + + + +def shard_run_unittest_GPU_1_of_3() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=unittest: GPU', + 'TVM_NUM_SHARDS=3', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu2", + label: 'Download artifacts from S3', + ) + + sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu_other.sh build" + // These require a GPU to finish the build (i.e. CUDA needs to be load-able) + make_standalone_crt(ci_gpu, 'build') + // make_cpp_tests(ci_gpu, 'build') + // cpp_unittest(ci_gpu) + + sh "rm -rf build" + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh build" + make_standalone_crt(ci_gpu, 'build') + make_cpp_tests(ci_gpu, 'build') + cpp_unittest(ci_gpu) + micro_cpp_unittest(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", + label: 'Run Python GPU unit tests', + ) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", + label: 'Run Python GPU integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/unittest_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('unittest: GPU 1 of 3') + } +} + +def shard_run_unittest_GPU_2_of_3() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=unittest: GPU', + 'TVM_NUM_SHARDS=3', + 'TVM_SHARD_INDEX=1', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh", + label: 'Run Java unit tests', + ) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", + label: 'Run Python GPU unit tests', + ) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", + label: 'Run Python GPU integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/unittest_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('unittest: GPU 2 of 3') + } +} + +def shard_run_unittest_GPU_3_of_3() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=unittest: GPU', + 'TVM_NUM_SHARDS=3', + 'TVM_SHARD_INDEX=2', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", + label: 'Run Python GPU unit tests', + ) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", + label: 'Run Python GPU integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/unittest_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('unittest: GPU 3 of 3') + } +} + + + +def shard_run_topi_GPU_1_of_3() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=topi: GPU', + 'TVM_NUM_SHARDS=3', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/topi_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('topi: GPU 1 of 3') + } +} + +def shard_run_topi_GPU_2_of_3() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=topi: GPU', + 'TVM_NUM_SHARDS=3', + 'TVM_SHARD_INDEX=1', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/topi_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('topi: GPU 2 of 3') + } +} + +def shard_run_topi_GPU_3_of_3() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=topi: GPU', + 'TVM_NUM_SHARDS=3', + 'TVM_SHARD_INDEX=2', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/topi_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('topi: GPU 3 of 3') + } +} + + + +def shard_run_frontend_GPU_1_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=frontend: GPU', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/frontend_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: GPU 1 of 6') + } +} + +def shard_run_frontend_GPU_2_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=frontend: GPU', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=1', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/frontend_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: GPU 2 of 6') + } +} + +def shard_run_frontend_GPU_3_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=frontend: GPU', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=2', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/frontend_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: GPU 3 of 6') + } +} + +def shard_run_frontend_GPU_4_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=frontend: GPU', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=3', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/frontend_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: GPU 4 of 6') + } +} + +def shard_run_frontend_GPU_5_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=frontend: GPU', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=4', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/frontend_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: GPU 5 of 6') + } +} + +def shard_run_frontend_GPU_6_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=frontend: GPU', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=5', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/frontend_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: GPU 6 of 6') + } +} + + + +def shard_run_docs_GPU_1_of_1() { + if (!skip_ci) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/docs-python-gpu") { + try { + init_git() + docker_init(ci_gpu) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TEST_STEP_NAME=docs: GPU', + 'TVM_NUM_SHARDS=1', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/gpu", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_docs.sh", + label: 'Build docs', + ) + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/docs --items docs.tgz", + label: 'Upload artifacts to S3', + ) + + sh( + script: "aws s3 cp --no-progress _docs s3://${s3_bucket}/${s3_prefix}/docs --recursive", + label: 'Upload docs to S3', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/docs_GPU --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('docs: GPU 1 of 1') + } +} + + + +def test() { + stage('Test') { + environment { + SKIP_SLOW_TESTS = "${skip_slow_tests}" + } + parallel( + 'unittest: GPU 1 of 3': { + shard_run_unittest_GPU_1_of_3() + }, + 'unittest: GPU 2 of 3': { + shard_run_unittest_GPU_2_of_3() + }, + 'unittest: GPU 3 of 3': { + shard_run_unittest_GPU_3_of_3() + }, + 'topi: GPU 1 of 3': { + shard_run_topi_GPU_1_of_3() + }, + 'topi: GPU 2 of 3': { + shard_run_topi_GPU_2_of_3() + }, + 'topi: GPU 3 of 3': { + shard_run_topi_GPU_3_of_3() + }, + 'frontend: GPU 1 of 6': { + shard_run_frontend_GPU_1_of_6() + }, + 'frontend: GPU 2 of 6': { + shard_run_frontend_GPU_2_of_6() + }, + 'frontend: GPU 3 of 6': { + shard_run_frontend_GPU_3_of_6() + }, + 'frontend: GPU 4 of 6': { + shard_run_frontend_GPU_4_of_6() + }, + 'frontend: GPU 5 of 6': { + shard_run_frontend_GPU_5_of_6() + }, + 'frontend: GPU 6 of 6': { + shard_run_frontend_GPU_6_of_6() + }, + 'docs: GPU 1 of 1': { + shard_run_docs_GPU_1_of_1() + }, + ) + } +} +test() + + + +def deploy_docs() { + // Note: This code must stay in the Jenkinsfile to ensure that it runs + // from a trusted context only + sh( + script: ''' + set -eux + rm -rf tvm-site + git clone -b $DOCS_DEPLOY_BRANCH --depth=1 https://github.com/apache/tvm-site + cd tvm-site + git status + git checkout -B $DOCS_DEPLOY_BRANCH + + git ls-tree HEAD docs/ --name-only | grep -vP '^docs/v\\d' | xargs rm -rf + mkdir -p docs + tar xf ../docs.tgz -C docs + COMMIT=$(cat docs/commit_hash) + git add . + git config user.name tvm-bot + git config user.email 95660001+tvm-bot@users.noreply.github.com + git commit -m"deploying docs (apache/tvm@$COMMIT)" + git status + ''', + label: 'Unpack docs and update tvm-site' + ) + + withCredentials([string( + credentialsId: 'docs-push-token', + variable: 'GITHUB_TOKEN', + )]) { + sh( + script: ''' + cd tvm-site + git remote add deploy https://$GITHUB_TOKEN:x-oauth-basic@github.com/apache/tvm-site.git + git push deploy $DOCS_DEPLOY_BRANCH || true + ''', + label: 'Upload docs to apache/tvm-site' + ) + } +} + +def deploy() { + stage('Deploy') { + if (env.BRANCH_NAME == 'main') { + parallel( + 'Deploy Docs': { + if (env.DOCS_DEPLOY_ENABLED == 'yes') { + node('CPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/deploy-docs") { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/docs", + label: 'Download artifacts from S3', + ) + + deploy_docs() + } + } + } + } else { + Utils.markStageSkippedForConditional('Deploy Docs') + } + }, + ) + } + } +} + +deploy() diff --git a/ci/jenkins/generated/hexagon_jenkinsfile.groovy b/ci/jenkins/generated/hexagon_jenkinsfile.groovy new file mode 100644 index 000000000000..4496225537c1 --- /dev/null +++ b/ci/jenkins/generated/hexagon_jenkinsfile.groovy @@ -0,0 +1,916 @@ +#!groovy +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// Docker env used for testing +// Different image may have different version tag +// because some of them are more stable than anoter. +// +// Docker images are maintained by PMC, cached in dockerhub +// and remains relatively stable over the time. +// Flow for upgrading docker env(need commiter) +// +// - Send PR to upgrade build script in the repo +// - Build the new docker image +// - Tag the docker image with a new version and push to a binary cache. +// - Update the version in the Jenkinsfile, send a PR +// - Fix any issues wrt to the new image version in the PR +// - Merge the PR and now we are in new version +// - Tag the new version as the lates +// - Periodically cleanup the old versions on local workers +// + +// ============================= IMPORTANT NOTE ============================= +// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! +// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with +// 'python3 jenkins/generate.py' +// Note: This timestamp is here to ensure that updates to the Jenkinsfile are +// always rebased on main before merging: +// Generated at 2022-12-01T17:07:24.608492 + +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils +ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' +ci_gpu = 'tlcpack/ci-gpu:20221019-060125-0b4836739' +ci_cpu = 'tlcpack/ci-cpu:20221013-060115-61c9742ea' +ci_minimal = 'tlcpack/ci-minimal:20221013-060115-61c9742ea' +ci_wasm = 'tlcpack/ci-wasm:20221013-060115-61c9742ea' +ci_i386 = 'tlcpack/ci-i386:20221013-060115-61c9742ea' +ci_cortexm = 'tlcpack/ci-cortexm:20221013-060115-61c9742ea' +ci_arm = 'tlcpack/ci-arm:20221013-060115-61c9742ea' +ci_hexagon = 'tlcpack/ci-hexagon:20221013-060115-61c9742ea' +ci_riscv = 'tlcpack/ci-riscv:20221013-060115-61c9742ea' + +// Parameters to allow overriding (in Jenkins UI), the images +// to be used by a given build. When provided, they take precedence +// over default values above. +properties([ + parameters([ + string(name: 'ci_arm_param', defaultValue: ''), + string(name: 'ci_cortexm_param', defaultValue: ''), + string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_gpu_param', defaultValue: ''), + string(name: 'ci_hexagon_param', defaultValue: ''), + string(name: 'ci_i386_param', defaultValue: ''), + string(name: 'ci_lint_param', defaultValue: ''), + string(name: 'ci_minimal_param', defaultValue: ''), + string(name: 'ci_riscv_param', defaultValue: ''), + string(name: 'ci_wasm_param', defaultValue: ''), + ]) +]) + +// Placeholders for newly built Docker image names (if rebuild_docker_images +// is used) + built_ci_arm = null; + built_ci_cortexm = null; + built_ci_cpu = null; + built_ci_gpu = null; + built_ci_hexagon = null; + built_ci_i386 = null; + built_ci_lint = null; + built_ci_minimal = null; + built_ci_riscv = null; + built_ci_wasm = null; + +// Global variable assigned during Sanity Check that holds the sha1 which should be +// merged into the PR in all branches. +upstream_revision = null + +// command to start a docker container +docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' +docker_build = 'docker/build.sh' +// timeout in minutes +max_time = 180 +rebuild_docker_images = false + +s3_bucket = 'tvm-jenkins-artifacts-prod' +s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" + +// Jenkins script root directory +jenkins_scripts_root = "ci/scripts/jenkins" + + +// General note: Jenkins has limits on the size of a method (or top level code) +// that are pretty strict, so most usage of groovy methods in these templates +// are purely to satisfy the JVM +def per_exec_ws(folder) { + return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder +} + +// initialize source codes +def init_git() { + retry(5) { + checkout scm + } + + // Add more info about job node + sh ( + script: './tests/scripts/task_show_node_info.sh', + label: 'Show executor node info', + ) + + // Determine merge commit to use for all stages + if (env.BRANCH_NAME == 'main') { + // Only set upstream_revision to HEAD and skip merging to avoid a race with another commit merged to main. + update_upstream_revision("HEAD") + } else { + // This is PR branch so merge with latest main. + merge_with_main() + } + + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 3 timeout 5m git submodule update --init -f --jobs 0 + """, + label: 'Update git submodules', + ) + checkout_trusted_files() +} + +def update_upstream_revision(git_ref) { + if (upstream_revision == null) { + upstream_revision = sh( + script: "git log -1 ${git_ref} --format=\'%H\'", + label: 'Determine upstream revision', + returnStdout: true, + ).trim() + } +} + +def merge_with_main() { + sh ( + script: 'git fetch origin main', + label: 'Fetch upstream', + ) + update_upstream_revision("FETCH_HEAD") + sh ( + script: "git -c user.name=TVM-Jenkins -c user.email=jenkins@tvm.apache.org merge ${upstream_revision}", + label: 'Merge to origin/main' + ) +} + +def docker_init(image) { + // Clear out all Docker images that aren't going to be used + sh( + script: """ + set -eux + docker image ls --all + IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}} {{.ID}}') + + echo -e "Found images:\\n\$IMAGES" + echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs docker rmi || test \$? = 123; } + + docker image ls --all + """, + label: 'Clean old Docker images', + ) + + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + // ecr_pull(image) + sh "echo Pulling from AWS is not implemented && exit 1" + } else { + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 5 docker pull ${image} + """, + label: 'Pull docker image', + ) + } +} + +def should_skip_slow_tests(pr_number) { + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run slow tests, exit code of 0 means skip slow tests + result = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr '${pr_number}'", + label: 'Check if CI should run slow tests', + ) + } + return result == 0 +} + +def cancel_previous_build() { + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != 'main') { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } +} + +def checkout_trusted_files() { + // trust everything from branch builds + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + return; + } + + // trust peoople listed in CONTRIBUTING.md + grep_code = sh( + returnStatus: true, + script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep '@${env.CHANGE_AUTHOR}'", + label: 'Check if change is from a contributor', + ) + + if (grep_code == 1) { + // Any scripts that run on the bare host and not inside a Docker container + // (especially those that access secrets) should be checked out here so + // only trusted versions are used in CI + sh( + script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.", + label: 'Check out trusted files', + ) + } +} + +def should_skip_ci(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + glob_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci_globs.py", + label: 'Check if CI should be skipped due to changed files', + ) + if (glob_skip_ci_code == 0) { + return true + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run full CI (or the script had an error, so run + // full CI just in case). Exit code of 0 means skip CI. + git_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'", + label: 'Check if CI should be skipped', + ) + } + return git_skip_ci_code == 0 +} + +def check_pr(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + sh ( + script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}", + label: 'Check PR title and body', + ) + } + +} + +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() + + check_pr(env.CHANGE_ID) + + if (env.DETERMINE_DOCKER_IMAGES == 'yes') { + sh( + script: "./${jenkins_scripts_root}/determine_docker_images.py ci_arm=${ci_arm} ci_cortexm=${ci_cortexm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_minimal=${ci_minimal} ci_riscv=${ci_riscv} ci_wasm=${ci_wasm} ", + label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', + ) + // Pull image names from the results of should_rebuild_docker.py + ci_arm = sh( + script: "cat .docker-image-names/ci_arm", + label: "Find docker image name for ci_arm", + returnStdout: true, + ).trim() + ci_cortexm = sh( + script: "cat .docker-image-names/ci_cortexm", + label: "Find docker image name for ci_cortexm", + returnStdout: true, + ).trim() + ci_cpu = sh( + script: "cat .docker-image-names/ci_cpu", + label: "Find docker image name for ci_cpu", + returnStdout: true, + ).trim() + ci_gpu = sh( + script: "cat .docker-image-names/ci_gpu", + label: "Find docker image name for ci_gpu", + returnStdout: true, + ).trim() + ci_hexagon = sh( + script: "cat .docker-image-names/ci_hexagon", + label: "Find docker image name for ci_hexagon", + returnStdout: true, + ).trim() + ci_i386 = sh( + script: "cat .docker-image-names/ci_i386", + label: "Find docker image name for ci_i386", + returnStdout: true, + ).trim() + ci_lint = sh( + script: "cat .docker-image-names/ci_lint", + label: "Find docker image name for ci_lint", + returnStdout: true, + ).trim() + ci_minimal = sh( + script: "cat .docker-image-names/ci_minimal", + label: "Find docker image name for ci_minimal", + returnStdout: true, + ).trim() + ci_riscv = sh( + script: "cat .docker-image-names/ci_riscv", + label: "Find docker image name for ci_riscv", + returnStdout: true, + ).trim() + ci_wasm = sh( + script: "cat .docker-image-names/ci_wasm", + label: "Find docker image name for ci_wasm", + returnStdout: true, + ).trim() + } + + ci_arm = params.ci_arm_param ?: ci_arm + ci_cortexm = params.ci_cortexm_param ?: ci_cortexm + ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_gpu = params.ci_gpu_param ?: ci_gpu + ci_hexagon = params.ci_hexagon_param ?: ci_hexagon + ci_i386 = params.ci_i386_param ?: ci_i386 + ci_lint = params.ci_lint_param ?: ci_lint + ci_minimal = params.ci_minimal_param ?: ci_minimal + ci_riscv = params.ci_riscv_param ?: ci_riscv + ci_wasm = params.ci_wasm_param ?: ci_wasm + + sh (script: """ + echo "Docker images being used in this build:" + echo " ci_arm = ${ci_arm}" + echo " ci_cortexm = ${ci_cortexm}" + echo " ci_cpu = ${ci_cpu}" + echo " ci_gpu = ${ci_gpu}" + echo " ci_hexagon = ${ci_hexagon}" + echo " ci_i386 = ${ci_i386}" + echo " ci_lint = ${ci_lint}" + echo " ci_minimal = ${ci_minimal}" + echo " ci_riscv = ${ci_riscv}" + echo " ci_wasm = ${ci_wasm}" + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docs.sh", + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docker.sh", + label: 'Check for any docker changes', + ) + + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } + } + } + } +} +def ci_setup(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", + label: 'Clean up old workspace', + ) +} + +def python_unittest(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", + label: 'Run Python unit tests', + ) +} + +def fsim_test(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", + label: 'Run VTA tests in FSIM', + ) +} + +def make_standalone_crt(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target standalone_crt \ + --build-dir build + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target crttest \ + --build-dir build + """, + label: 'Make standalone CRT', + ) +} + +def make_cpp_tests(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target cpptest \ + --build-dir ${build_dir} + """, + label: 'Make C++ tests', + ) +} + +def cmake_build(image, path, make_flag) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", + label: 'Run cmake build', + ) +} +def cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + label: 'Run C++ tests', + ) +} + +def micro_cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", + label: 'Run microTVM C++ tests', + ) +} + +cancel_previous_build() + +prepare() +def build() { + stage('Build') { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-hexagon") { + init_git() + docker_init(ci_hexagon) + timeout(time: max_time, unit: 'MINUTES') { + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_config_build_hexagon.sh build", + label: 'Create Hexagon cmake config', + ) + cmake_build(ci_hexagon, 'build', '-j2') + make_cpp_tests(ci_hexagon, 'build') + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_build_hexagon_api.sh", + label: 'Build Hexagon API', + ) + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/hexagon --items build/libtvm.so build/libtvm_runtime.so build/config.cmake build/cpptest build/build.ninja build/CMakeFiles/rules.ninja build/hexagon_api_output", + label: 'Upload artifacts to S3', + ) + } + } + } + } else { + Utils.markStageSkippedForConditional('BUILD: Hexagon') + } + } +} +build() + + + + +def shard_run_test_Hexagon_1_of_8() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + docker_init(ci_hexagon) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TEST_STEP_NAME=test: Hexagon', + 'TVM_NUM_SHARDS=8', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/hexagon", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_hexagon) + cpp_unittest(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Hexagon --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Hexagon 1 of 8') + } +} + +def shard_run_test_Hexagon_2_of_8() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + docker_init(ci_hexagon) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TEST_STEP_NAME=test: Hexagon', + 'TVM_NUM_SHARDS=8', + 'TVM_SHARD_INDEX=1', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/hexagon", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Hexagon --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Hexagon 2 of 8') + } +} + +def shard_run_test_Hexagon_3_of_8() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + docker_init(ci_hexagon) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TEST_STEP_NAME=test: Hexagon', + 'TVM_NUM_SHARDS=8', + 'TVM_SHARD_INDEX=2', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/hexagon", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Hexagon --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Hexagon 3 of 8') + } +} + +def shard_run_test_Hexagon_4_of_8() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + docker_init(ci_hexagon) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TEST_STEP_NAME=test: Hexagon', + 'TVM_NUM_SHARDS=8', + 'TVM_SHARD_INDEX=3', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/hexagon", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Hexagon --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Hexagon 4 of 8') + } +} + +def shard_run_test_Hexagon_5_of_8() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + docker_init(ci_hexagon) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TEST_STEP_NAME=test: Hexagon', + 'TVM_NUM_SHARDS=8', + 'TVM_SHARD_INDEX=4', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/hexagon", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Hexagon --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Hexagon 5 of 8') + } +} + +def shard_run_test_Hexagon_6_of_8() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + docker_init(ci_hexagon) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TEST_STEP_NAME=test: Hexagon', + 'TVM_NUM_SHARDS=8', + 'TVM_SHARD_INDEX=5', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/hexagon", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Hexagon --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Hexagon 6 of 8') + } +} + +def shard_run_test_Hexagon_7_of_8() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + docker_init(ci_hexagon) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TEST_STEP_NAME=test: Hexagon', + 'TVM_NUM_SHARDS=8', + 'TVM_SHARD_INDEX=6', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/hexagon", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Hexagon --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Hexagon 7 of 8') + } +} + +def shard_run_test_Hexagon_8_of_8() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + docker_init(ci_hexagon) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TEST_STEP_NAME=test: Hexagon', + 'TVM_NUM_SHARDS=8', + 'TVM_SHARD_INDEX=7', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/hexagon", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_Hexagon --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Hexagon 8 of 8') + } +} + + +def test() { + stage('Test') { + environment { + SKIP_SLOW_TESTS = "${skip_slow_tests}" + } + parallel( + 'test: Hexagon 1 of 8': { + shard_run_test_Hexagon_1_of_8() + }, + 'test: Hexagon 2 of 8': { + shard_run_test_Hexagon_2_of_8() + }, + 'test: Hexagon 3 of 8': { + shard_run_test_Hexagon_3_of_8() + }, + 'test: Hexagon 4 of 8': { + shard_run_test_Hexagon_4_of_8() + }, + 'test: Hexagon 5 of 8': { + shard_run_test_Hexagon_5_of_8() + }, + 'test: Hexagon 6 of 8': { + shard_run_test_Hexagon_6_of_8() + }, + 'test: Hexagon 7 of 8': { + shard_run_test_Hexagon_7_of_8() + }, + 'test: Hexagon 8 of 8': { + shard_run_test_Hexagon_8_of_8() + }, + ) + } +} +test() diff --git a/ci/jenkins/generated/i386_jenkinsfile.groovy b/ci/jenkins/generated/i386_jenkinsfile.groovy new file mode 100644 index 000000000000..5335b0af4e01 --- /dev/null +++ b/ci/jenkins/generated/i386_jenkinsfile.groovy @@ -0,0 +1,678 @@ +#!groovy +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// Docker env used for testing +// Different image may have different version tag +// because some of them are more stable than anoter. +// +// Docker images are maintained by PMC, cached in dockerhub +// and remains relatively stable over the time. +// Flow for upgrading docker env(need commiter) +// +// - Send PR to upgrade build script in the repo +// - Build the new docker image +// - Tag the docker image with a new version and push to a binary cache. +// - Update the version in the Jenkinsfile, send a PR +// - Fix any issues wrt to the new image version in the PR +// - Merge the PR and now we are in new version +// - Tag the new version as the lates +// - Periodically cleanup the old versions on local workers +// + +// ============================= IMPORTANT NOTE ============================= +// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! +// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with +// 'python3 jenkins/generate.py' +// Note: This timestamp is here to ensure that updates to the Jenkinsfile are +// always rebased on main before merging: +// Generated at 2022-12-01T17:07:24.663072 + +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils +ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' +ci_gpu = 'tlcpack/ci-gpu:20221019-060125-0b4836739' +ci_cpu = 'tlcpack/ci-cpu:20221013-060115-61c9742ea' +ci_minimal = 'tlcpack/ci-minimal:20221013-060115-61c9742ea' +ci_wasm = 'tlcpack/ci-wasm:20221013-060115-61c9742ea' +ci_i386 = 'tlcpack/ci-i386:20221013-060115-61c9742ea' +ci_cortexm = 'tlcpack/ci-cortexm:20221013-060115-61c9742ea' +ci_arm = 'tlcpack/ci-arm:20221013-060115-61c9742ea' +ci_hexagon = 'tlcpack/ci-hexagon:20221013-060115-61c9742ea' +ci_riscv = 'tlcpack/ci-riscv:20221013-060115-61c9742ea' + +// Parameters to allow overriding (in Jenkins UI), the images +// to be used by a given build. When provided, they take precedence +// over default values above. +properties([ + parameters([ + string(name: 'ci_arm_param', defaultValue: ''), + string(name: 'ci_cortexm_param', defaultValue: ''), + string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_gpu_param', defaultValue: ''), + string(name: 'ci_hexagon_param', defaultValue: ''), + string(name: 'ci_i386_param', defaultValue: ''), + string(name: 'ci_lint_param', defaultValue: ''), + string(name: 'ci_minimal_param', defaultValue: ''), + string(name: 'ci_riscv_param', defaultValue: ''), + string(name: 'ci_wasm_param', defaultValue: ''), + ]) +]) + +// Placeholders for newly built Docker image names (if rebuild_docker_images +// is used) + built_ci_arm = null; + built_ci_cortexm = null; + built_ci_cpu = null; + built_ci_gpu = null; + built_ci_hexagon = null; + built_ci_i386 = null; + built_ci_lint = null; + built_ci_minimal = null; + built_ci_riscv = null; + built_ci_wasm = null; + +// Global variable assigned during Sanity Check that holds the sha1 which should be +// merged into the PR in all branches. +upstream_revision = null + +// command to start a docker container +docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' +docker_build = 'docker/build.sh' +// timeout in minutes +max_time = 180 +rebuild_docker_images = false + +s3_bucket = 'tvm-jenkins-artifacts-prod' +s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" + +// Jenkins script root directory +jenkins_scripts_root = "ci/scripts/jenkins" + + +// General note: Jenkins has limits on the size of a method (or top level code) +// that are pretty strict, so most usage of groovy methods in these templates +// are purely to satisfy the JVM +def per_exec_ws(folder) { + return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder +} + +// initialize source codes +def init_git() { + retry(5) { + checkout scm + } + + // Add more info about job node + sh ( + script: './tests/scripts/task_show_node_info.sh', + label: 'Show executor node info', + ) + + // Determine merge commit to use for all stages + if (env.BRANCH_NAME == 'main') { + // Only set upstream_revision to HEAD and skip merging to avoid a race with another commit merged to main. + update_upstream_revision("HEAD") + } else { + // This is PR branch so merge with latest main. + merge_with_main() + } + + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 3 timeout 5m git submodule update --init -f --jobs 0 + """, + label: 'Update git submodules', + ) + checkout_trusted_files() +} + +def update_upstream_revision(git_ref) { + if (upstream_revision == null) { + upstream_revision = sh( + script: "git log -1 ${git_ref} --format=\'%H\'", + label: 'Determine upstream revision', + returnStdout: true, + ).trim() + } +} + +def merge_with_main() { + sh ( + script: 'git fetch origin main', + label: 'Fetch upstream', + ) + update_upstream_revision("FETCH_HEAD") + sh ( + script: "git -c user.name=TVM-Jenkins -c user.email=jenkins@tvm.apache.org merge ${upstream_revision}", + label: 'Merge to origin/main' + ) +} + +def docker_init(image) { + // Clear out all Docker images that aren't going to be used + sh( + script: """ + set -eux + docker image ls --all + IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}} {{.ID}}') + + echo -e "Found images:\\n\$IMAGES" + echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs docker rmi || test \$? = 123; } + + docker image ls --all + """, + label: 'Clean old Docker images', + ) + + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + // ecr_pull(image) + sh "echo Pulling from AWS is not implemented && exit 1" + } else { + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 5 docker pull ${image} + """, + label: 'Pull docker image', + ) + } +} + +def should_skip_slow_tests(pr_number) { + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run slow tests, exit code of 0 means skip slow tests + result = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr '${pr_number}'", + label: 'Check if CI should run slow tests', + ) + } + return result == 0 +} + +def cancel_previous_build() { + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != 'main') { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } +} + +def checkout_trusted_files() { + // trust everything from branch builds + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + return; + } + + // trust peoople listed in CONTRIBUTING.md + grep_code = sh( + returnStatus: true, + script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep '@${env.CHANGE_AUTHOR}'", + label: 'Check if change is from a contributor', + ) + + if (grep_code == 1) { + // Any scripts that run on the bare host and not inside a Docker container + // (especially those that access secrets) should be checked out here so + // only trusted versions are used in CI + sh( + script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.", + label: 'Check out trusted files', + ) + } +} + +def should_skip_ci(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + glob_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci_globs.py", + label: 'Check if CI should be skipped due to changed files', + ) + if (glob_skip_ci_code == 0) { + return true + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run full CI (or the script had an error, so run + // full CI just in case). Exit code of 0 means skip CI. + git_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'", + label: 'Check if CI should be skipped', + ) + } + return git_skip_ci_code == 0 +} + +def check_pr(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + sh ( + script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}", + label: 'Check PR title and body', + ) + } + +} + +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() + + check_pr(env.CHANGE_ID) + + if (env.DETERMINE_DOCKER_IMAGES == 'yes') { + sh( + script: "./${jenkins_scripts_root}/determine_docker_images.py ci_arm=${ci_arm} ci_cortexm=${ci_cortexm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_minimal=${ci_minimal} ci_riscv=${ci_riscv} ci_wasm=${ci_wasm} ", + label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', + ) + // Pull image names from the results of should_rebuild_docker.py + ci_arm = sh( + script: "cat .docker-image-names/ci_arm", + label: "Find docker image name for ci_arm", + returnStdout: true, + ).trim() + ci_cortexm = sh( + script: "cat .docker-image-names/ci_cortexm", + label: "Find docker image name for ci_cortexm", + returnStdout: true, + ).trim() + ci_cpu = sh( + script: "cat .docker-image-names/ci_cpu", + label: "Find docker image name for ci_cpu", + returnStdout: true, + ).trim() + ci_gpu = sh( + script: "cat .docker-image-names/ci_gpu", + label: "Find docker image name for ci_gpu", + returnStdout: true, + ).trim() + ci_hexagon = sh( + script: "cat .docker-image-names/ci_hexagon", + label: "Find docker image name for ci_hexagon", + returnStdout: true, + ).trim() + ci_i386 = sh( + script: "cat .docker-image-names/ci_i386", + label: "Find docker image name for ci_i386", + returnStdout: true, + ).trim() + ci_lint = sh( + script: "cat .docker-image-names/ci_lint", + label: "Find docker image name for ci_lint", + returnStdout: true, + ).trim() + ci_minimal = sh( + script: "cat .docker-image-names/ci_minimal", + label: "Find docker image name for ci_minimal", + returnStdout: true, + ).trim() + ci_riscv = sh( + script: "cat .docker-image-names/ci_riscv", + label: "Find docker image name for ci_riscv", + returnStdout: true, + ).trim() + ci_wasm = sh( + script: "cat .docker-image-names/ci_wasm", + label: "Find docker image name for ci_wasm", + returnStdout: true, + ).trim() + } + + ci_arm = params.ci_arm_param ?: ci_arm + ci_cortexm = params.ci_cortexm_param ?: ci_cortexm + ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_gpu = params.ci_gpu_param ?: ci_gpu + ci_hexagon = params.ci_hexagon_param ?: ci_hexagon + ci_i386 = params.ci_i386_param ?: ci_i386 + ci_lint = params.ci_lint_param ?: ci_lint + ci_minimal = params.ci_minimal_param ?: ci_minimal + ci_riscv = params.ci_riscv_param ?: ci_riscv + ci_wasm = params.ci_wasm_param ?: ci_wasm + + sh (script: """ + echo "Docker images being used in this build:" + echo " ci_arm = ${ci_arm}" + echo " ci_cortexm = ${ci_cortexm}" + echo " ci_cpu = ${ci_cpu}" + echo " ci_gpu = ${ci_gpu}" + echo " ci_hexagon = ${ci_hexagon}" + echo " ci_i386 = ${ci_i386}" + echo " ci_lint = ${ci_lint}" + echo " ci_minimal = ${ci_minimal}" + echo " ci_riscv = ${ci_riscv}" + echo " ci_wasm = ${ci_wasm}" + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docs.sh", + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docker.sh", + label: 'Check for any docker changes', + ) + + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } + } + } + } +} +def ci_setup(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", + label: 'Clean up old workspace', + ) +} + +def python_unittest(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", + label: 'Run Python unit tests', + ) +} + +def fsim_test(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", + label: 'Run VTA tests in FSIM', + ) +} + +def make_standalone_crt(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target standalone_crt \ + --build-dir build + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target crttest \ + --build-dir build + """, + label: 'Make standalone CRT', + ) +} + +def make_cpp_tests(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target cpptest \ + --build-dir ${build_dir} + """, + label: 'Make C++ tests', + ) +} + +def cmake_build(image, path, make_flag) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", + label: 'Run cmake build', + ) +} +def cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + label: 'Run C++ tests', + ) +} + +def micro_cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", + label: 'Run microTVM C++ tests', + ) +} + +cancel_previous_build() + +prepare() +def build() { + stage('Build') { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-i386") { + init_git() + docker_init(ci_i386) + timeout(time: max_time, unit: 'MINUTES') { + sh ( + script: "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh build", + label: 'Create i386 cmake config', + ) + cmake_build(ci_i386, 'build', '-j2') + make_standalone_crt(ci_i386, 'build') + make_cpp_tests(ci_i386, 'build') + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/i386 --items build/libvta_tsim.so build/libtvm.so build/libvta_fsim.so build/libtvm_runtime.so build/config.cmake build/standalone_crt build/build.ninja build/crttest build/cpptest build/build.ninja build/CMakeFiles/rules.ninja", + label: 'Upload artifacts to S3', + ) + } + } + } + } else { + Utils.markStageSkippedForConditional('BUILD: i386') + } + } +} +build() + + + + +def shard_run_python_i386_1_of_3() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { + try { + init_git() + docker_init(ci_i386) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=i386', + 'TEST_STEP_NAME=python: i386', + 'TVM_NUM_SHARDS=3', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/i386", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_i386) + cpp_unittest(ci_i386) + micro_cpp_unittest(ci_i386) + python_unittest(ci_i386) + sh ( + script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", + label: 'Run i386 integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/python_i386 --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('python: i386 1 of 3') + } +} + +def shard_run_python_i386_2_of_3() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { + try { + init_git() + docker_init(ci_i386) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=i386', + 'TEST_STEP_NAME=python: i386', + 'TVM_NUM_SHARDS=3', + 'TVM_SHARD_INDEX=1', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/i386", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_i386) + python_unittest(ci_i386) + sh ( + script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", + label: 'Run i386 integration tests', + ) + fsim_test(ci_i386) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/python_i386 --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('python: i386 2 of 3') + } +} + +def shard_run_python_i386_3_of_3() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { + try { + init_git() + docker_init(ci_i386) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=i386', + 'TEST_STEP_NAME=python: i386', + 'TVM_NUM_SHARDS=3', + 'TVM_SHARD_INDEX=2', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/i386", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_i386) + python_unittest(ci_i386) + sh ( + script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", + label: 'Run i386 integration tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/python_i386 --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('python: i386 3 of 3') + } +} + + +def test() { + stage('Test') { + environment { + SKIP_SLOW_TESTS = "${skip_slow_tests}" + } + parallel( + 'python: i386 1 of 3': { + shard_run_python_i386_1_of_3() + }, + 'python: i386 2 of 3': { + shard_run_python_i386_2_of_3() + }, + 'python: i386 3 of 3': { + shard_run_python_i386_3_of_3() + }, + ) + } +} +test() diff --git a/ci/jenkins/generated/lint_jenkinsfile.groovy b/ci/jenkins/generated/lint_jenkinsfile.groovy new file mode 100644 index 000000000000..6173049478d0 --- /dev/null +++ b/ci/jenkins/generated/lint_jenkinsfile.groovy @@ -0,0 +1,530 @@ +#!groovy +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// Docker env used for testing +// Different image may have different version tag +// because some of them are more stable than anoter. +// +// Docker images are maintained by PMC, cached in dockerhub +// and remains relatively stable over the time. +// Flow for upgrading docker env(need commiter) +// +// - Send PR to upgrade build script in the repo +// - Build the new docker image +// - Tag the docker image with a new version and push to a binary cache. +// - Update the version in the Jenkinsfile, send a PR +// - Fix any issues wrt to the new image version in the PR +// - Merge the PR and now we are in new version +// - Tag the new version as the lates +// - Periodically cleanup the old versions on local workers +// + +// ============================= IMPORTANT NOTE ============================= +// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! +// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with +// 'python3 jenkins/generate.py' +// Note: This timestamp is here to ensure that updates to the Jenkinsfile are +// always rebased on main before merging: +// Generated at 2022-12-01T17:07:24.720777 + +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils +ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' +ci_gpu = 'tlcpack/ci-gpu:20221019-060125-0b4836739' +ci_cpu = 'tlcpack/ci-cpu:20221013-060115-61c9742ea' +ci_minimal = 'tlcpack/ci-minimal:20221013-060115-61c9742ea' +ci_wasm = 'tlcpack/ci-wasm:20221013-060115-61c9742ea' +ci_i386 = 'tlcpack/ci-i386:20221013-060115-61c9742ea' +ci_cortexm = 'tlcpack/ci-cortexm:20221013-060115-61c9742ea' +ci_arm = 'tlcpack/ci-arm:20221013-060115-61c9742ea' +ci_hexagon = 'tlcpack/ci-hexagon:20221013-060115-61c9742ea' +ci_riscv = 'tlcpack/ci-riscv:20221013-060115-61c9742ea' + +// Parameters to allow overriding (in Jenkins UI), the images +// to be used by a given build. When provided, they take precedence +// over default values above. +properties([ + parameters([ + string(name: 'ci_arm_param', defaultValue: ''), + string(name: 'ci_cortexm_param', defaultValue: ''), + string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_gpu_param', defaultValue: ''), + string(name: 'ci_hexagon_param', defaultValue: ''), + string(name: 'ci_i386_param', defaultValue: ''), + string(name: 'ci_lint_param', defaultValue: ''), + string(name: 'ci_minimal_param', defaultValue: ''), + string(name: 'ci_riscv_param', defaultValue: ''), + string(name: 'ci_wasm_param', defaultValue: ''), + ]) +]) + +// Placeholders for newly built Docker image names (if rebuild_docker_images +// is used) + built_ci_arm = null; + built_ci_cortexm = null; + built_ci_cpu = null; + built_ci_gpu = null; + built_ci_hexagon = null; + built_ci_i386 = null; + built_ci_lint = null; + built_ci_minimal = null; + built_ci_riscv = null; + built_ci_wasm = null; + +// Global variable assigned during Sanity Check that holds the sha1 which should be +// merged into the PR in all branches. +upstream_revision = null + +// command to start a docker container +docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' +docker_build = 'docker/build.sh' +// timeout in minutes +max_time = 180 +rebuild_docker_images = false + +s3_bucket = 'tvm-jenkins-artifacts-prod' +s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" + +// Jenkins script root directory +jenkins_scripts_root = "ci/scripts/jenkins" + + +// General note: Jenkins has limits on the size of a method (or top level code) +// that are pretty strict, so most usage of groovy methods in these templates +// are purely to satisfy the JVM +def per_exec_ws(folder) { + return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder +} + +// initialize source codes +def init_git() { + retry(5) { + checkout scm + } + + // Add more info about job node + sh ( + script: './tests/scripts/task_show_node_info.sh', + label: 'Show executor node info', + ) + + // Determine merge commit to use for all stages + if (env.BRANCH_NAME == 'main') { + // Only set upstream_revision to HEAD and skip merging to avoid a race with another commit merged to main. + update_upstream_revision("HEAD") + } else { + // This is PR branch so merge with latest main. + merge_with_main() + } + + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 3 timeout 5m git submodule update --init -f --jobs 0 + """, + label: 'Update git submodules', + ) + checkout_trusted_files() +} + +def update_upstream_revision(git_ref) { + if (upstream_revision == null) { + upstream_revision = sh( + script: "git log -1 ${git_ref} --format=\'%H\'", + label: 'Determine upstream revision', + returnStdout: true, + ).trim() + } +} + +def merge_with_main() { + sh ( + script: 'git fetch origin main', + label: 'Fetch upstream', + ) + update_upstream_revision("FETCH_HEAD") + sh ( + script: "git -c user.name=TVM-Jenkins -c user.email=jenkins@tvm.apache.org merge ${upstream_revision}", + label: 'Merge to origin/main' + ) +} + +def docker_init(image) { + // Clear out all Docker images that aren't going to be used + sh( + script: """ + set -eux + docker image ls --all + IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}} {{.ID}}') + + echo -e "Found images:\\n\$IMAGES" + echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs docker rmi || test \$? = 123; } + + docker image ls --all + """, + label: 'Clean old Docker images', + ) + + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + // ecr_pull(image) + sh "echo Pulling from AWS is not implemented && exit 1" + } else { + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 5 docker pull ${image} + """, + label: 'Pull docker image', + ) + } +} + +def should_skip_slow_tests(pr_number) { + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run slow tests, exit code of 0 means skip slow tests + result = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr '${pr_number}'", + label: 'Check if CI should run slow tests', + ) + } + return result == 0 +} + +def cancel_previous_build() { + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != 'main') { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } +} + +def checkout_trusted_files() { + // trust everything from branch builds + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + return; + } + + // trust peoople listed in CONTRIBUTING.md + grep_code = sh( + returnStatus: true, + script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep '@${env.CHANGE_AUTHOR}'", + label: 'Check if change is from a contributor', + ) + + if (grep_code == 1) { + // Any scripts that run on the bare host and not inside a Docker container + // (especially those that access secrets) should be checked out here so + // only trusted versions are used in CI + sh( + script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.", + label: 'Check out trusted files', + ) + } +} + +def should_skip_ci(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + glob_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci_globs.py", + label: 'Check if CI should be skipped due to changed files', + ) + if (glob_skip_ci_code == 0) { + return true + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run full CI (or the script had an error, so run + // full CI just in case). Exit code of 0 means skip CI. + git_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'", + label: 'Check if CI should be skipped', + ) + } + return git_skip_ci_code == 0 +} + +def check_pr(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + sh ( + script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}", + label: 'Check PR title and body', + ) + } + +} + +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() + + check_pr(env.CHANGE_ID) + + if (env.DETERMINE_DOCKER_IMAGES == 'yes') { + sh( + script: "./${jenkins_scripts_root}/determine_docker_images.py ci_arm=${ci_arm} ci_cortexm=${ci_cortexm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_minimal=${ci_minimal} ci_riscv=${ci_riscv} ci_wasm=${ci_wasm} ", + label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', + ) + // Pull image names from the results of should_rebuild_docker.py + ci_arm = sh( + script: "cat .docker-image-names/ci_arm", + label: "Find docker image name for ci_arm", + returnStdout: true, + ).trim() + ci_cortexm = sh( + script: "cat .docker-image-names/ci_cortexm", + label: "Find docker image name for ci_cortexm", + returnStdout: true, + ).trim() + ci_cpu = sh( + script: "cat .docker-image-names/ci_cpu", + label: "Find docker image name for ci_cpu", + returnStdout: true, + ).trim() + ci_gpu = sh( + script: "cat .docker-image-names/ci_gpu", + label: "Find docker image name for ci_gpu", + returnStdout: true, + ).trim() + ci_hexagon = sh( + script: "cat .docker-image-names/ci_hexagon", + label: "Find docker image name for ci_hexagon", + returnStdout: true, + ).trim() + ci_i386 = sh( + script: "cat .docker-image-names/ci_i386", + label: "Find docker image name for ci_i386", + returnStdout: true, + ).trim() + ci_lint = sh( + script: "cat .docker-image-names/ci_lint", + label: "Find docker image name for ci_lint", + returnStdout: true, + ).trim() + ci_minimal = sh( + script: "cat .docker-image-names/ci_minimal", + label: "Find docker image name for ci_minimal", + returnStdout: true, + ).trim() + ci_riscv = sh( + script: "cat .docker-image-names/ci_riscv", + label: "Find docker image name for ci_riscv", + returnStdout: true, + ).trim() + ci_wasm = sh( + script: "cat .docker-image-names/ci_wasm", + label: "Find docker image name for ci_wasm", + returnStdout: true, + ).trim() + } + + ci_arm = params.ci_arm_param ?: ci_arm + ci_cortexm = params.ci_cortexm_param ?: ci_cortexm + ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_gpu = params.ci_gpu_param ?: ci_gpu + ci_hexagon = params.ci_hexagon_param ?: ci_hexagon + ci_i386 = params.ci_i386_param ?: ci_i386 + ci_lint = params.ci_lint_param ?: ci_lint + ci_minimal = params.ci_minimal_param ?: ci_minimal + ci_riscv = params.ci_riscv_param ?: ci_riscv + ci_wasm = params.ci_wasm_param ?: ci_wasm + + sh (script: """ + echo "Docker images being used in this build:" + echo " ci_arm = ${ci_arm}" + echo " ci_cortexm = ${ci_cortexm}" + echo " ci_cpu = ${ci_cpu}" + echo " ci_gpu = ${ci_gpu}" + echo " ci_hexagon = ${ci_hexagon}" + echo " ci_i386 = ${ci_i386}" + echo " ci_lint = ${ci_lint}" + echo " ci_minimal = ${ci_minimal}" + echo " ci_riscv = ${ci_riscv}" + echo " ci_wasm = ${ci_wasm}" + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docs.sh", + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docker.sh", + label: 'Check for any docker changes', + ) + + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } + } + } + } +} +def ci_setup(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", + label: 'Clean up old workspace', + ) +} + +def python_unittest(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", + label: 'Run Python unit tests', + ) +} + +def fsim_test(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", + label: 'Run VTA tests in FSIM', + ) +} + +def make_standalone_crt(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target standalone_crt \ + --build-dir build + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target crttest \ + --build-dir build + """, + label: 'Make standalone CRT', + ) +} + +def make_cpp_tests(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target cpptest \ + --build-dir ${build_dir} + """, + label: 'Make C++ tests', + ) +} + +def cmake_build(image, path, make_flag) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", + label: 'Run cmake build', + ) +} +def cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + label: 'Run C++ tests', + ) +} + +def micro_cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", + label: 'Run microTVM C++ tests', + ) +} + +cancel_previous_build() + +prepare() + +stage('Lint') { + parallel( + 'Lint 1 of 2': { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/lint") { + init_git() + docker_init(ci_lint) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'TVM_NUM_SHARDS=2', + 'TEST_STEP_NAME=Lint', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh ( + script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", + label: 'Run lint', + ) + }) + } + } + } + }, + 'Lint 2 of 2': { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/lint") { + init_git() + docker_init(ci_lint) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'TVM_NUM_SHARDS=2', + 'TEST_STEP_NAME=Lint', + 'TVM_SHARD_INDEX=1', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh ( + script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", + label: 'Run lint', + ) + }) + } + } + } + }, + ) +} diff --git a/ci/jenkins/generated/minimal_jenkinsfile.groovy b/ci/jenkins/generated/minimal_jenkinsfile.groovy new file mode 100644 index 000000000000..c5f625248424 --- /dev/null +++ b/ci/jenkins/generated/minimal_jenkinsfile.groovy @@ -0,0 +1,573 @@ +#!groovy +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// Docker env used for testing +// Different image may have different version tag +// because some of them are more stable than anoter. +// +// Docker images are maintained by PMC, cached in dockerhub +// and remains relatively stable over the time. +// Flow for upgrading docker env(need commiter) +// +// - Send PR to upgrade build script in the repo +// - Build the new docker image +// - Tag the docker image with a new version and push to a binary cache. +// - Update the version in the Jenkinsfile, send a PR +// - Fix any issues wrt to the new image version in the PR +// - Merge the PR and now we are in new version +// - Tag the new version as the lates +// - Periodically cleanup the old versions on local workers +// + +// ============================= IMPORTANT NOTE ============================= +// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! +// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with +// 'python3 jenkins/generate.py' +// Note: This timestamp is here to ensure that updates to the Jenkinsfile are +// always rebased on main before merging: +// Generated at 2022-12-01T17:07:24.737352 + +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils +ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' +ci_gpu = 'tlcpack/ci-gpu:20221019-060125-0b4836739' +ci_cpu = 'tlcpack/ci-cpu:20221013-060115-61c9742ea' +ci_minimal = 'tlcpack/ci-minimal:20221013-060115-61c9742ea' +ci_wasm = 'tlcpack/ci-wasm:20221013-060115-61c9742ea' +ci_i386 = 'tlcpack/ci-i386:20221013-060115-61c9742ea' +ci_cortexm = 'tlcpack/ci-cortexm:20221013-060115-61c9742ea' +ci_arm = 'tlcpack/ci-arm:20221013-060115-61c9742ea' +ci_hexagon = 'tlcpack/ci-hexagon:20221013-060115-61c9742ea' +ci_riscv = 'tlcpack/ci-riscv:20221013-060115-61c9742ea' + +// Parameters to allow overriding (in Jenkins UI), the images +// to be used by a given build. When provided, they take precedence +// over default values above. +properties([ + parameters([ + string(name: 'ci_arm_param', defaultValue: ''), + string(name: 'ci_cortexm_param', defaultValue: ''), + string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_gpu_param', defaultValue: ''), + string(name: 'ci_hexagon_param', defaultValue: ''), + string(name: 'ci_i386_param', defaultValue: ''), + string(name: 'ci_lint_param', defaultValue: ''), + string(name: 'ci_minimal_param', defaultValue: ''), + string(name: 'ci_riscv_param', defaultValue: ''), + string(name: 'ci_wasm_param', defaultValue: ''), + ]) +]) + +// Placeholders for newly built Docker image names (if rebuild_docker_images +// is used) + built_ci_arm = null; + built_ci_cortexm = null; + built_ci_cpu = null; + built_ci_gpu = null; + built_ci_hexagon = null; + built_ci_i386 = null; + built_ci_lint = null; + built_ci_minimal = null; + built_ci_riscv = null; + built_ci_wasm = null; + +// Global variable assigned during Sanity Check that holds the sha1 which should be +// merged into the PR in all branches. +upstream_revision = null + +// command to start a docker container +docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' +docker_build = 'docker/build.sh' +// timeout in minutes +max_time = 180 +rebuild_docker_images = false + +s3_bucket = 'tvm-jenkins-artifacts-prod' +s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" + +// Jenkins script root directory +jenkins_scripts_root = "ci/scripts/jenkins" + + +// General note: Jenkins has limits on the size of a method (or top level code) +// that are pretty strict, so most usage of groovy methods in these templates +// are purely to satisfy the JVM +def per_exec_ws(folder) { + return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder +} + +// initialize source codes +def init_git() { + retry(5) { + checkout scm + } + + // Add more info about job node + sh ( + script: './tests/scripts/task_show_node_info.sh', + label: 'Show executor node info', + ) + + // Determine merge commit to use for all stages + if (env.BRANCH_NAME == 'main') { + // Only set upstream_revision to HEAD and skip merging to avoid a race with another commit merged to main. + update_upstream_revision("HEAD") + } else { + // This is PR branch so merge with latest main. + merge_with_main() + } + + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 3 timeout 5m git submodule update --init -f --jobs 0 + """, + label: 'Update git submodules', + ) + checkout_trusted_files() +} + +def update_upstream_revision(git_ref) { + if (upstream_revision == null) { + upstream_revision = sh( + script: "git log -1 ${git_ref} --format=\'%H\'", + label: 'Determine upstream revision', + returnStdout: true, + ).trim() + } +} + +def merge_with_main() { + sh ( + script: 'git fetch origin main', + label: 'Fetch upstream', + ) + update_upstream_revision("FETCH_HEAD") + sh ( + script: "git -c user.name=TVM-Jenkins -c user.email=jenkins@tvm.apache.org merge ${upstream_revision}", + label: 'Merge to origin/main' + ) +} + +def docker_init(image) { + // Clear out all Docker images that aren't going to be used + sh( + script: """ + set -eux + docker image ls --all + IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}} {{.ID}}') + + echo -e "Found images:\\n\$IMAGES" + echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs docker rmi || test \$? = 123; } + + docker image ls --all + """, + label: 'Clean old Docker images', + ) + + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + // ecr_pull(image) + sh "echo Pulling from AWS is not implemented && exit 1" + } else { + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 5 docker pull ${image} + """, + label: 'Pull docker image', + ) + } +} + +def should_skip_slow_tests(pr_number) { + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run slow tests, exit code of 0 means skip slow tests + result = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr '${pr_number}'", + label: 'Check if CI should run slow tests', + ) + } + return result == 0 +} + +def cancel_previous_build() { + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != 'main') { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } +} + +def checkout_trusted_files() { + // trust everything from branch builds + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + return; + } + + // trust peoople listed in CONTRIBUTING.md + grep_code = sh( + returnStatus: true, + script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep '@${env.CHANGE_AUTHOR}'", + label: 'Check if change is from a contributor', + ) + + if (grep_code == 1) { + // Any scripts that run on the bare host and not inside a Docker container + // (especially those that access secrets) should be checked out here so + // only trusted versions are used in CI + sh( + script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.", + label: 'Check out trusted files', + ) + } +} + +def should_skip_ci(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + glob_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci_globs.py", + label: 'Check if CI should be skipped due to changed files', + ) + if (glob_skip_ci_code == 0) { + return true + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run full CI (or the script had an error, so run + // full CI just in case). Exit code of 0 means skip CI. + git_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'", + label: 'Check if CI should be skipped', + ) + } + return git_skip_ci_code == 0 +} + +def check_pr(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + sh ( + script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}", + label: 'Check PR title and body', + ) + } + +} + +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() + + check_pr(env.CHANGE_ID) + + if (env.DETERMINE_DOCKER_IMAGES == 'yes') { + sh( + script: "./${jenkins_scripts_root}/determine_docker_images.py ci_arm=${ci_arm} ci_cortexm=${ci_cortexm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_minimal=${ci_minimal} ci_riscv=${ci_riscv} ci_wasm=${ci_wasm} ", + label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', + ) + // Pull image names from the results of should_rebuild_docker.py + ci_arm = sh( + script: "cat .docker-image-names/ci_arm", + label: "Find docker image name for ci_arm", + returnStdout: true, + ).trim() + ci_cortexm = sh( + script: "cat .docker-image-names/ci_cortexm", + label: "Find docker image name for ci_cortexm", + returnStdout: true, + ).trim() + ci_cpu = sh( + script: "cat .docker-image-names/ci_cpu", + label: "Find docker image name for ci_cpu", + returnStdout: true, + ).trim() + ci_gpu = sh( + script: "cat .docker-image-names/ci_gpu", + label: "Find docker image name for ci_gpu", + returnStdout: true, + ).trim() + ci_hexagon = sh( + script: "cat .docker-image-names/ci_hexagon", + label: "Find docker image name for ci_hexagon", + returnStdout: true, + ).trim() + ci_i386 = sh( + script: "cat .docker-image-names/ci_i386", + label: "Find docker image name for ci_i386", + returnStdout: true, + ).trim() + ci_lint = sh( + script: "cat .docker-image-names/ci_lint", + label: "Find docker image name for ci_lint", + returnStdout: true, + ).trim() + ci_minimal = sh( + script: "cat .docker-image-names/ci_minimal", + label: "Find docker image name for ci_minimal", + returnStdout: true, + ).trim() + ci_riscv = sh( + script: "cat .docker-image-names/ci_riscv", + label: "Find docker image name for ci_riscv", + returnStdout: true, + ).trim() + ci_wasm = sh( + script: "cat .docker-image-names/ci_wasm", + label: "Find docker image name for ci_wasm", + returnStdout: true, + ).trim() + } + + ci_arm = params.ci_arm_param ?: ci_arm + ci_cortexm = params.ci_cortexm_param ?: ci_cortexm + ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_gpu = params.ci_gpu_param ?: ci_gpu + ci_hexagon = params.ci_hexagon_param ?: ci_hexagon + ci_i386 = params.ci_i386_param ?: ci_i386 + ci_lint = params.ci_lint_param ?: ci_lint + ci_minimal = params.ci_minimal_param ?: ci_minimal + ci_riscv = params.ci_riscv_param ?: ci_riscv + ci_wasm = params.ci_wasm_param ?: ci_wasm + + sh (script: """ + echo "Docker images being used in this build:" + echo " ci_arm = ${ci_arm}" + echo " ci_cortexm = ${ci_cortexm}" + echo " ci_cpu = ${ci_cpu}" + echo " ci_gpu = ${ci_gpu}" + echo " ci_hexagon = ${ci_hexagon}" + echo " ci_i386 = ${ci_i386}" + echo " ci_lint = ${ci_lint}" + echo " ci_minimal = ${ci_minimal}" + echo " ci_riscv = ${ci_riscv}" + echo " ci_wasm = ${ci_wasm}" + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docs.sh", + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docker.sh", + label: 'Check for any docker changes', + ) + + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } + } + } + } +} +def ci_setup(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", + label: 'Clean up old workspace', + ) +} + +def python_unittest(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", + label: 'Run Python unit tests', + ) +} + +def fsim_test(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", + label: 'Run VTA tests in FSIM', + ) +} + +def make_standalone_crt(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target standalone_crt \ + --build-dir build + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target crttest \ + --build-dir build + """, + label: 'Make standalone CRT', + ) +} + +def make_cpp_tests(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target cpptest \ + --build-dir ${build_dir} + """, + label: 'Make C++ tests', + ) +} + +def cmake_build(image, path, make_flag) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", + label: 'Run cmake build', + ) +} +def cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + label: 'Run C++ tests', + ) +} + +def micro_cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", + label: 'Run microTVM C++ tests', + ) +} + +cancel_previous_build() + +prepare() +def build() { + stage('Build') { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-cpu-minimal") { + init_git() + docker_init(ci_minimal) + timeout(time: max_time, unit: 'MINUTES') { + sh ( + script: "${docker_run} ${ci_minimal} ./tests/scripts/task_config_build_minimal.sh build", + label: 'Create CPU minimal cmake config', + ) + cmake_build(ci_minimal, 'build', '-j2') + make_cpp_tests(ci_minimal, 'build') + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/cpu-minimal --items build/libtvm.so build/libtvm_runtime.so build/config.cmake build/libtvm_allvisible.so build/cpptest build/build.ninja build/CMakeFiles/rules.ninja", + label: 'Upload artifacts to S3', + ) + } + } + } + } else { + Utils.markStageSkippedForConditional('BUILD: CPU MINIMAL') + } + } +} +build() + + + + +def shard_run_unittest_CPU_MINIMAL_1_of_1() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-cpu-minimal") { + try { + init_git() + docker_init(ci_minimal) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=minimal', + 'TEST_STEP_NAME=unittest: CPU MINIMAL', + 'TVM_NUM_SHARDS=1', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/cpu-minimal", + label: 'Download artifacts from S3', + ) + + cpp_unittest(ci_minimal) + python_unittest(ci_minimal) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/unittest_CPU_MINIMAL --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('unittest: CPU MINIMAL 1 of 1') + } +} + + + +def test() { + stage('Test') { + environment { + SKIP_SLOW_TESTS = "${skip_slow_tests}" + } + parallel( + 'unittest: CPU MINIMAL 1 of 1': { + shard_run_unittest_CPU_MINIMAL_1_of_1() + }, + ) + } +} +test() diff --git a/ci/jenkins/generated/riscv_jenkinsfile.groovy b/ci/jenkins/generated/riscv_jenkinsfile.groovy new file mode 100644 index 000000000000..5892cba00e41 --- /dev/null +++ b/ci/jenkins/generated/riscv_jenkinsfile.groovy @@ -0,0 +1,579 @@ +#!groovy +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// Docker env used for testing +// Different image may have different version tag +// because some of them are more stable than anoter. +// +// Docker images are maintained by PMC, cached in dockerhub +// and remains relatively stable over the time. +// Flow for upgrading docker env(need commiter) +// +// - Send PR to upgrade build script in the repo +// - Build the new docker image +// - Tag the docker image with a new version and push to a binary cache. +// - Update the version in the Jenkinsfile, send a PR +// - Fix any issues wrt to the new image version in the PR +// - Merge the PR and now we are in new version +// - Tag the new version as the lates +// - Periodically cleanup the old versions on local workers +// + +// ============================= IMPORTANT NOTE ============================= +// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! +// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with +// 'python3 jenkins/generate.py' +// Note: This timestamp is here to ensure that updates to the Jenkinsfile are +// always rebased on main before merging: +// Generated at 2022-12-01T17:07:24.646306 + +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils +ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' +ci_gpu = 'tlcpack/ci-gpu:20221019-060125-0b4836739' +ci_cpu = 'tlcpack/ci-cpu:20221013-060115-61c9742ea' +ci_minimal = 'tlcpack/ci-minimal:20221013-060115-61c9742ea' +ci_wasm = 'tlcpack/ci-wasm:20221013-060115-61c9742ea' +ci_i386 = 'tlcpack/ci-i386:20221013-060115-61c9742ea' +ci_cortexm = 'tlcpack/ci-cortexm:20221013-060115-61c9742ea' +ci_arm = 'tlcpack/ci-arm:20221013-060115-61c9742ea' +ci_hexagon = 'tlcpack/ci-hexagon:20221013-060115-61c9742ea' +ci_riscv = 'tlcpack/ci-riscv:20221013-060115-61c9742ea' + +// Parameters to allow overriding (in Jenkins UI), the images +// to be used by a given build. When provided, they take precedence +// over default values above. +properties([ + parameters([ + string(name: 'ci_arm_param', defaultValue: ''), + string(name: 'ci_cortexm_param', defaultValue: ''), + string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_gpu_param', defaultValue: ''), + string(name: 'ci_hexagon_param', defaultValue: ''), + string(name: 'ci_i386_param', defaultValue: ''), + string(name: 'ci_lint_param', defaultValue: ''), + string(name: 'ci_minimal_param', defaultValue: ''), + string(name: 'ci_riscv_param', defaultValue: ''), + string(name: 'ci_wasm_param', defaultValue: ''), + ]) +]) + +// Placeholders for newly built Docker image names (if rebuild_docker_images +// is used) + built_ci_arm = null; + built_ci_cortexm = null; + built_ci_cpu = null; + built_ci_gpu = null; + built_ci_hexagon = null; + built_ci_i386 = null; + built_ci_lint = null; + built_ci_minimal = null; + built_ci_riscv = null; + built_ci_wasm = null; + +// Global variable assigned during Sanity Check that holds the sha1 which should be +// merged into the PR in all branches. +upstream_revision = null + +// command to start a docker container +docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' +docker_build = 'docker/build.sh' +// timeout in minutes +max_time = 180 +rebuild_docker_images = false + +s3_bucket = 'tvm-jenkins-artifacts-prod' +s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" + +// Jenkins script root directory +jenkins_scripts_root = "ci/scripts/jenkins" + + +// General note: Jenkins has limits on the size of a method (or top level code) +// that are pretty strict, so most usage of groovy methods in these templates +// are purely to satisfy the JVM +def per_exec_ws(folder) { + return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder +} + +// initialize source codes +def init_git() { + retry(5) { + checkout scm + } + + // Add more info about job node + sh ( + script: './tests/scripts/task_show_node_info.sh', + label: 'Show executor node info', + ) + + // Determine merge commit to use for all stages + if (env.BRANCH_NAME == 'main') { + // Only set upstream_revision to HEAD and skip merging to avoid a race with another commit merged to main. + update_upstream_revision("HEAD") + } else { + // This is PR branch so merge with latest main. + merge_with_main() + } + + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 3 timeout 5m git submodule update --init -f --jobs 0 + """, + label: 'Update git submodules', + ) + checkout_trusted_files() +} + +def update_upstream_revision(git_ref) { + if (upstream_revision == null) { + upstream_revision = sh( + script: "git log -1 ${git_ref} --format=\'%H\'", + label: 'Determine upstream revision', + returnStdout: true, + ).trim() + } +} + +def merge_with_main() { + sh ( + script: 'git fetch origin main', + label: 'Fetch upstream', + ) + update_upstream_revision("FETCH_HEAD") + sh ( + script: "git -c user.name=TVM-Jenkins -c user.email=jenkins@tvm.apache.org merge ${upstream_revision}", + label: 'Merge to origin/main' + ) +} + +def docker_init(image) { + // Clear out all Docker images that aren't going to be used + sh( + script: """ + set -eux + docker image ls --all + IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}} {{.ID}}') + + echo -e "Found images:\\n\$IMAGES" + echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs docker rmi || test \$? = 123; } + + docker image ls --all + """, + label: 'Clean old Docker images', + ) + + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + // ecr_pull(image) + sh "echo Pulling from AWS is not implemented && exit 1" + } else { + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 5 docker pull ${image} + """, + label: 'Pull docker image', + ) + } +} + +def should_skip_slow_tests(pr_number) { + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run slow tests, exit code of 0 means skip slow tests + result = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr '${pr_number}'", + label: 'Check if CI should run slow tests', + ) + } + return result == 0 +} + +def cancel_previous_build() { + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != 'main') { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } +} + +def checkout_trusted_files() { + // trust everything from branch builds + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + return; + } + + // trust peoople listed in CONTRIBUTING.md + grep_code = sh( + returnStatus: true, + script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep '@${env.CHANGE_AUTHOR}'", + label: 'Check if change is from a contributor', + ) + + if (grep_code == 1) { + // Any scripts that run on the bare host and not inside a Docker container + // (especially those that access secrets) should be checked out here so + // only trusted versions are used in CI + sh( + script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.", + label: 'Check out trusted files', + ) + } +} + +def should_skip_ci(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + glob_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci_globs.py", + label: 'Check if CI should be skipped due to changed files', + ) + if (glob_skip_ci_code == 0) { + return true + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run full CI (or the script had an error, so run + // full CI just in case). Exit code of 0 means skip CI. + git_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'", + label: 'Check if CI should be skipped', + ) + } + return git_skip_ci_code == 0 +} + +def check_pr(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + sh ( + script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}", + label: 'Check PR title and body', + ) + } + +} + +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() + + check_pr(env.CHANGE_ID) + + if (env.DETERMINE_DOCKER_IMAGES == 'yes') { + sh( + script: "./${jenkins_scripts_root}/determine_docker_images.py ci_arm=${ci_arm} ci_cortexm=${ci_cortexm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_minimal=${ci_minimal} ci_riscv=${ci_riscv} ci_wasm=${ci_wasm} ", + label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', + ) + // Pull image names from the results of should_rebuild_docker.py + ci_arm = sh( + script: "cat .docker-image-names/ci_arm", + label: "Find docker image name for ci_arm", + returnStdout: true, + ).trim() + ci_cortexm = sh( + script: "cat .docker-image-names/ci_cortexm", + label: "Find docker image name for ci_cortexm", + returnStdout: true, + ).trim() + ci_cpu = sh( + script: "cat .docker-image-names/ci_cpu", + label: "Find docker image name for ci_cpu", + returnStdout: true, + ).trim() + ci_gpu = sh( + script: "cat .docker-image-names/ci_gpu", + label: "Find docker image name for ci_gpu", + returnStdout: true, + ).trim() + ci_hexagon = sh( + script: "cat .docker-image-names/ci_hexagon", + label: "Find docker image name for ci_hexagon", + returnStdout: true, + ).trim() + ci_i386 = sh( + script: "cat .docker-image-names/ci_i386", + label: "Find docker image name for ci_i386", + returnStdout: true, + ).trim() + ci_lint = sh( + script: "cat .docker-image-names/ci_lint", + label: "Find docker image name for ci_lint", + returnStdout: true, + ).trim() + ci_minimal = sh( + script: "cat .docker-image-names/ci_minimal", + label: "Find docker image name for ci_minimal", + returnStdout: true, + ).trim() + ci_riscv = sh( + script: "cat .docker-image-names/ci_riscv", + label: "Find docker image name for ci_riscv", + returnStdout: true, + ).trim() + ci_wasm = sh( + script: "cat .docker-image-names/ci_wasm", + label: "Find docker image name for ci_wasm", + returnStdout: true, + ).trim() + } + + ci_arm = params.ci_arm_param ?: ci_arm + ci_cortexm = params.ci_cortexm_param ?: ci_cortexm + ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_gpu = params.ci_gpu_param ?: ci_gpu + ci_hexagon = params.ci_hexagon_param ?: ci_hexagon + ci_i386 = params.ci_i386_param ?: ci_i386 + ci_lint = params.ci_lint_param ?: ci_lint + ci_minimal = params.ci_minimal_param ?: ci_minimal + ci_riscv = params.ci_riscv_param ?: ci_riscv + ci_wasm = params.ci_wasm_param ?: ci_wasm + + sh (script: """ + echo "Docker images being used in this build:" + echo " ci_arm = ${ci_arm}" + echo " ci_cortexm = ${ci_cortexm}" + echo " ci_cpu = ${ci_cpu}" + echo " ci_gpu = ${ci_gpu}" + echo " ci_hexagon = ${ci_hexagon}" + echo " ci_i386 = ${ci_i386}" + echo " ci_lint = ${ci_lint}" + echo " ci_minimal = ${ci_minimal}" + echo " ci_riscv = ${ci_riscv}" + echo " ci_wasm = ${ci_wasm}" + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docs.sh", + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docker.sh", + label: 'Check for any docker changes', + ) + + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } + } + } + } +} +def ci_setup(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", + label: 'Clean up old workspace', + ) +} + +def python_unittest(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", + label: 'Run Python unit tests', + ) +} + +def fsim_test(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", + label: 'Run VTA tests in FSIM', + ) +} + +def make_standalone_crt(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target standalone_crt \ + --build-dir build + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target crttest \ + --build-dir build + """, + label: 'Make standalone CRT', + ) +} + +def make_cpp_tests(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target cpptest \ + --build-dir ${build_dir} + """, + label: 'Make C++ tests', + ) +} + +def cmake_build(image, path, make_flag) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", + label: 'Run cmake build', + ) +} +def cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + label: 'Run C++ tests', + ) +} + +def micro_cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", + label: 'Run microTVM C++ tests', + ) +} + +cancel_previous_build() + +prepare() +def build() { + stage('Build') { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-riscv") { + init_git() + docker_init(ci_riscv) + timeout(time: max_time, unit: 'MINUTES') { + sh ( + script: "${docker_run} ${ci_riscv} ./tests/scripts/task_config_build_riscv.sh build", + label: 'Create RISC-V cmake config', + ) + cmake_build(ci_riscv, 'build', '-j2') + make_standalone_crt(ci_riscv, 'build') + make_cpp_tests(ci_riscv, 'build') + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/riscv --items build/libtvm.so build/libtvm_runtime.so build/config.cmake build/libtvm_allvisible.so build/standalone_crt build/build.ninja build/crttest build/cpptest build/build.ninja build/CMakeFiles/rules.ninja build/microtvm_template_projects", + label: 'Upload artifacts to S3', + ) + } + } + } + } else { + Utils.markStageSkippedForConditional('BUILD: RISC-V') + } + } +} +build() + + + + + +def shard_run_test_RISC_V_1_of_1() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-riscv") { + try { + init_git() + docker_init(ci_riscv) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=riscv', + 'TEST_STEP_NAME=test: RISC-V', + 'TVM_NUM_SHARDS=1', + 'TVM_SHARD_INDEX=0', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + sh( + script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/riscv", + label: 'Download artifacts from S3', + ) + + ci_setup(ci_riscv) + cpp_unittest(ci_cortexm) + micro_cpp_unittest(ci_cortexm) + sh ( + script: "${docker_run} ${ci_riscv} ./tests/scripts/task_riscv_microtvm.sh", + label: 'Run microTVM tests', + ) + }) + } + } finally { + try { + sh( + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/pytest-results/test_RISC_V --items build/pytest-results", + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } catch (Exception e) { + echo 'Exception during JUnit upload: ' + e.toString() + } + } + } + } + } else { + Utils.markStageSkippedForConditional('test: RISC-V 1 of 1') + } +} + + +def test() { + stage('Test') { + environment { + SKIP_SLOW_TESTS = "${skip_slow_tests}" + } + parallel( + 'test: RISC-V 1 of 1': { + shard_run_test_RISC_V_1_of_1() + }, + ) + } +} +test() diff --git a/ci/jenkins/generated/wasm_jenkinsfile.groovy b/ci/jenkins/generated/wasm_jenkinsfile.groovy new file mode 100644 index 000000000000..773154010c16 --- /dev/null +++ b/ci/jenkins/generated/wasm_jenkinsfile.groovy @@ -0,0 +1,515 @@ +#!groovy +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// Docker env used for testing +// Different image may have different version tag +// because some of them are more stable than anoter. +// +// Docker images are maintained by PMC, cached in dockerhub +// and remains relatively stable over the time. +// Flow for upgrading docker env(need commiter) +// +// - Send PR to upgrade build script in the repo +// - Build the new docker image +// - Tag the docker image with a new version and push to a binary cache. +// - Update the version in the Jenkinsfile, send a PR +// - Fix any issues wrt to the new image version in the PR +// - Merge the PR and now we are in new version +// - Tag the new version as the lates +// - Periodically cleanup the old versions on local workers +// + +// ============================= IMPORTANT NOTE ============================= +// This file is generated by 'jenkins/generate.py'. Do not edit this file directly! +// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with +// 'python3 jenkins/generate.py' +// Note: This timestamp is here to ensure that updates to the Jenkinsfile are +// always rebased on main before merging: +// Generated at 2022-12-01T17:07:24.772820 + +import org.jenkinsci.plugins.pipeline.modeldefinition.Utils +ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' +ci_gpu = 'tlcpack/ci-gpu:20221019-060125-0b4836739' +ci_cpu = 'tlcpack/ci-cpu:20221013-060115-61c9742ea' +ci_minimal = 'tlcpack/ci-minimal:20221013-060115-61c9742ea' +ci_wasm = 'tlcpack/ci-wasm:20221013-060115-61c9742ea' +ci_i386 = 'tlcpack/ci-i386:20221013-060115-61c9742ea' +ci_cortexm = 'tlcpack/ci-cortexm:20221013-060115-61c9742ea' +ci_arm = 'tlcpack/ci-arm:20221013-060115-61c9742ea' +ci_hexagon = 'tlcpack/ci-hexagon:20221013-060115-61c9742ea' +ci_riscv = 'tlcpack/ci-riscv:20221013-060115-61c9742ea' + +// Parameters to allow overriding (in Jenkins UI), the images +// to be used by a given build. When provided, they take precedence +// over default values above. +properties([ + parameters([ + string(name: 'ci_arm_param', defaultValue: ''), + string(name: 'ci_cortexm_param', defaultValue: ''), + string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_gpu_param', defaultValue: ''), + string(name: 'ci_hexagon_param', defaultValue: ''), + string(name: 'ci_i386_param', defaultValue: ''), + string(name: 'ci_lint_param', defaultValue: ''), + string(name: 'ci_minimal_param', defaultValue: ''), + string(name: 'ci_riscv_param', defaultValue: ''), + string(name: 'ci_wasm_param', defaultValue: ''), + ]) +]) + +// Placeholders for newly built Docker image names (if rebuild_docker_images +// is used) + built_ci_arm = null; + built_ci_cortexm = null; + built_ci_cpu = null; + built_ci_gpu = null; + built_ci_hexagon = null; + built_ci_i386 = null; + built_ci_lint = null; + built_ci_minimal = null; + built_ci_riscv = null; + built_ci_wasm = null; + +// Global variable assigned during Sanity Check that holds the sha1 which should be +// merged into the PR in all branches. +upstream_revision = null + +// command to start a docker container +docker_run = 'docker/bash.sh --env CI --env TVM_SHARD_INDEX --env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS --env TEST_STEP_NAME' +docker_build = 'docker/build.sh' +// timeout in minutes +max_time = 180 +rebuild_docker_images = false + +s3_bucket = 'tvm-jenkins-artifacts-prod' +s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" + +// Jenkins script root directory +jenkins_scripts_root = "ci/scripts/jenkins" + + +// General note: Jenkins has limits on the size of a method (or top level code) +// that are pretty strict, so most usage of groovy methods in these templates +// are purely to satisfy the JVM +def per_exec_ws(folder) { + return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder +} + +// initialize source codes +def init_git() { + retry(5) { + checkout scm + } + + // Add more info about job node + sh ( + script: './tests/scripts/task_show_node_info.sh', + label: 'Show executor node info', + ) + + // Determine merge commit to use for all stages + if (env.BRANCH_NAME == 'main') { + // Only set upstream_revision to HEAD and skip merging to avoid a race with another commit merged to main. + update_upstream_revision("HEAD") + } else { + // This is PR branch so merge with latest main. + merge_with_main() + } + + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 3 timeout 5m git submodule update --init -f --jobs 0 + """, + label: 'Update git submodules', + ) + checkout_trusted_files() +} + +def update_upstream_revision(git_ref) { + if (upstream_revision == null) { + upstream_revision = sh( + script: "git log -1 ${git_ref} --format=\'%H\'", + label: 'Determine upstream revision', + returnStdout: true, + ).trim() + } +} + +def merge_with_main() { + sh ( + script: 'git fetch origin main', + label: 'Fetch upstream', + ) + update_upstream_revision("FETCH_HEAD") + sh ( + script: "git -c user.name=TVM-Jenkins -c user.email=jenkins@tvm.apache.org merge ${upstream_revision}", + label: 'Merge to origin/main' + ) +} + +def docker_init(image) { + // Clear out all Docker images that aren't going to be used + sh( + script: """ + set -eux + docker image ls --all + IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}} {{.ID}}') + + echo -e "Found images:\\n\$IMAGES" + echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs docker rmi || test \$? = 123; } + + docker image ls --all + """, + label: 'Clean old Docker images', + ) + + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + // ecr_pull(image) + sh "echo Pulling from AWS is not implemented && exit 1" + } else { + sh( + script: """ + set -eux + . ${jenkins_scripts_root}/retry.sh + retry 5 docker pull ${image} + """, + label: 'Pull docker image', + ) + } +} + +def should_skip_slow_tests(pr_number) { + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run slow tests, exit code of 0 means skip slow tests + result = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr '${pr_number}'", + label: 'Check if CI should run slow tests', + ) + } + return result == 0 +} + +def cancel_previous_build() { + // cancel previous build if it is not on main. + if (env.BRANCH_NAME != 'main') { + def buildNumber = env.BUILD_NUMBER as int + // Milestone API allows us to cancel previous build + // with the same milestone number + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) + } +} + +def checkout_trusted_files() { + // trust everything from branch builds + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + return; + } + + // trust peoople listed in CONTRIBUTING.md + grep_code = sh( + returnStatus: true, + script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep '@${env.CHANGE_AUTHOR}'", + label: 'Check if change is from a contributor', + ) + + if (grep_code == 1) { + // Any scripts that run on the bare host and not inside a Docker container + // (especially those that access secrets) should be checked out here so + // only trusted versions are used in CI + sh( + script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.", + label: 'Check out trusted files', + ) + } +} + +def should_skip_ci(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + glob_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci_globs.py", + label: 'Check if CI should be skipped due to changed files', + ) + if (glob_skip_ci_code == 0) { + return true + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + // Exit code of 1 means run full CI (or the script had an error, so run + // full CI just in case). Exit code of 0 means skip CI. + git_skip_ci_code = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'", + label: 'Check if CI should be skipped', + ) + } + return git_skip_ci_code == 0 +} + +def check_pr(pr_number) { + if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { + // never skip CI on build sourced from a branch + return false + } + withCredentials([string( + credentialsId: 'tvm-bot-jenkins-reader', + variable: 'GITHUB_TOKEN', + )]) { + sh ( + script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}", + label: 'Check PR title and body', + ) + } + +} + +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() + + check_pr(env.CHANGE_ID) + + if (env.DETERMINE_DOCKER_IMAGES == 'yes') { + sh( + script: "./${jenkins_scripts_root}/determine_docker_images.py ci_arm=${ci_arm} ci_cortexm=${ci_cortexm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_minimal=${ci_minimal} ci_riscv=${ci_riscv} ci_wasm=${ci_wasm} ", + label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', + ) + // Pull image names from the results of should_rebuild_docker.py + ci_arm = sh( + script: "cat .docker-image-names/ci_arm", + label: "Find docker image name for ci_arm", + returnStdout: true, + ).trim() + ci_cortexm = sh( + script: "cat .docker-image-names/ci_cortexm", + label: "Find docker image name for ci_cortexm", + returnStdout: true, + ).trim() + ci_cpu = sh( + script: "cat .docker-image-names/ci_cpu", + label: "Find docker image name for ci_cpu", + returnStdout: true, + ).trim() + ci_gpu = sh( + script: "cat .docker-image-names/ci_gpu", + label: "Find docker image name for ci_gpu", + returnStdout: true, + ).trim() + ci_hexagon = sh( + script: "cat .docker-image-names/ci_hexagon", + label: "Find docker image name for ci_hexagon", + returnStdout: true, + ).trim() + ci_i386 = sh( + script: "cat .docker-image-names/ci_i386", + label: "Find docker image name for ci_i386", + returnStdout: true, + ).trim() + ci_lint = sh( + script: "cat .docker-image-names/ci_lint", + label: "Find docker image name for ci_lint", + returnStdout: true, + ).trim() + ci_minimal = sh( + script: "cat .docker-image-names/ci_minimal", + label: "Find docker image name for ci_minimal", + returnStdout: true, + ).trim() + ci_riscv = sh( + script: "cat .docker-image-names/ci_riscv", + label: "Find docker image name for ci_riscv", + returnStdout: true, + ).trim() + ci_wasm = sh( + script: "cat .docker-image-names/ci_wasm", + label: "Find docker image name for ci_wasm", + returnStdout: true, + ).trim() + } + + ci_arm = params.ci_arm_param ?: ci_arm + ci_cortexm = params.ci_cortexm_param ?: ci_cortexm + ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_gpu = params.ci_gpu_param ?: ci_gpu + ci_hexagon = params.ci_hexagon_param ?: ci_hexagon + ci_i386 = params.ci_i386_param ?: ci_i386 + ci_lint = params.ci_lint_param ?: ci_lint + ci_minimal = params.ci_minimal_param ?: ci_minimal + ci_riscv = params.ci_riscv_param ?: ci_riscv + ci_wasm = params.ci_wasm_param ?: ci_wasm + + sh (script: """ + echo "Docker images being used in this build:" + echo " ci_arm = ${ci_arm}" + echo " ci_cortexm = ${ci_cortexm}" + echo " ci_cpu = ${ci_cpu}" + echo " ci_gpu = ${ci_gpu}" + echo " ci_hexagon = ${ci_hexagon}" + echo " ci_i386 = ${ci_i386}" + echo " ci_lint = ${ci_lint}" + echo " ci_minimal = ${ci_minimal}" + echo " ci_riscv = ${ci_riscv}" + echo " ci_wasm = ${ci_wasm}" + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docs.sh", + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: "./${jenkins_scripts_root}/git_change_docker.sh", + label: 'Check for any docker changes', + ) + + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } + } + } + } +} +def ci_setup(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", + label: 'Clean up old workspace', + ) +} + +def python_unittest(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", + label: 'Run Python unit tests', + ) +} + +def fsim_test(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", + label: 'Run VTA tests in FSIM', + ) +} + +def make_standalone_crt(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target standalone_crt \ + --build-dir build + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target crttest \ + --build-dir build + """, + label: 'Make standalone CRT', + ) +} + +def make_cpp_tests(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target cpptest \ + --build-dir ${build_dir} + """, + label: 'Make C++ tests', + ) +} + +def cmake_build(image, path, make_flag) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", + label: 'Run cmake build', + ) +} +def cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + label: 'Run C++ tests', + ) +} + +def micro_cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", + label: 'Run microTVM C++ tests', + ) +} + +cancel_previous_build() + +prepare() +def build() { + stage('Build') { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-wasm") { + init_git() + docker_init(ci_wasm) + timeout(time: max_time, unit: 'MINUTES') { + sh ( + script: "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh build", + label: 'Create WASM cmake config', + ) + cmake_build(ci_wasm, 'build', '-j2') + make_standalone_crt(ci_wasm, 'build') + make_cpp_tests(ci_wasm, 'build') + cpp_unittest(ci_wasm) + ci_setup(ci_wasm) + sh ( + script: "${docker_run} ${ci_wasm} ./tests/scripts/task_web_wasm.sh", + label: 'Run WASM lint and tests', + ) + } + } + } + } else { + Utils.markStageSkippedForConditional('BUILD: WASM') + } + } +} +build() diff --git a/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 b/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 index 59d0ebc0e621..8840ede5921a 100644 --- a/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 @@ -1,2 +1,83 @@ {% include "utils/base.groovy.j2" with context %} {% import 'utils/macros.j2' as m with context -%} + +{% call m.invoke_build( + name='BUILD: arm', + node='ARM-SMALL', + condition='!skip_ci && is_docs_only_build != 1', + ws='tvm/build-arm', + docker_image='ci_arm', +) %} + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh build", + label: 'Create ARM cmake config', + ) + cmake_build(ci_arm, 'build', '-j4') + make_standalone_crt(ci_arm, 'build') + make_cpp_tests(ci_arm, 'build') + {{ m.upload_artifacts(tag='arm', filenames=tvm_multilib + cpptest + crttest + standalone_crt) }} +{% endcall %} + +{% set test_method_names = [] %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="integration: aarch64", + num_shards=4, + node="ARM-SMALL", + ws="tvm/ut-python-arm", + platform="arm", + docker_image="ci_arm", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='arm') }} + ci_setup(ci_arm) + python_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) +{% endcall %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="topi: aarch64", + node="ARM-SMALL", + ws="tvm/ut-python-arm", + platform="arm", + docker_image="ci_arm", + num_shards=2, + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='arm') }} + ci_setup(ci_arm) + {% if shard_index == 1 %} + cpp_unittest(ci_arm) + micro_cpp_unittest(ci_arm) + {% endif %} + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh", + label: 'Run test_arm_compute_lib test', + ) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) +{% endcall %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="frontend: aarch64", + node="ARM-SMALL", + ws="tvm/frontend-python-arm", + platform="arm", + docker_image="ci_arm", + num_shards=2, + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='arm') }} + ci_setup(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_frontend_cpu.sh", + label: 'Run Python frontend tests', + ) +{% endcall %} + +{{ m.invoke_tests(test_method_names) -}} \ No newline at end of file diff --git a/ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 b/ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 new file mode 100644 index 000000000000..c982ee8176b7 --- /dev/null +++ b/ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 @@ -0,0 +1,48 @@ +{% include "utils/base.groovy.j2" with context %} +{% import 'utils/macros.j2' as m with context -%} + +{% call m.invoke_build( + name='BUILD: Cortex-M', + node='CPU-SMALL', + condition='!skip_ci && is_docs_only_build != 1', + ws='tvm/build-cortexm', + docker_image='ci_cortexm', +) %} + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_config_build_cortexm.sh build", + label: 'Create Cortex-M cmake config', + ) + cmake_build(ci_cortexm, 'build', '-j2') + make_standalone_crt(ci_cortexm, 'build') + make_cpp_tests(ci_cortexm, 'build') + {{ m.upload_artifacts(tag='cortexm', filenames=tvm_lib + tvm_allvisible + crttest + standalone_crt + cpptest + microtvm_template_projects) }} +{% endcall %} + +{% set test_method_names = [] %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="test: Cortex-M", + node="CPU-SMALL", + ws="tvm/test-cortexm", + platform="cortexm", + docker_image="ci_cortexm", + num_shards=12, + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='cortexm') }} + ci_setup(ci_cortexm) + {% if shard_index == 1%} + cpp_unittest(ci_cortexm) + micro_cpp_unittest(ci_cortexm) + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_demo_microtvm.sh", + label: 'Run microTVM demos', + ) + {% endif %} + sh ( + script: "${docker_run} ${ci_cortexm} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) +{% endcall %} + +{{ m.invoke_tests(test_method_names) -}} \ No newline at end of file diff --git a/ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 b/ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 new file mode 100644 index 000000000000..2443ad3a42f0 --- /dev/null +++ b/ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 @@ -0,0 +1,81 @@ +{% include "utils/base.groovy.j2" with context %} +{% import 'utils/macros.j2' as m with context -%} + +{% call m.invoke_build( + name='BUILD: CPU', + node='CPU-SMALL', + condition='!skip_ci && is_docs_only_build != 1', + ws='tvm/build-cpu', + docker_image='ci_cpu', +) %} + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh build", + label: 'Create CPU cmake config', + ) + cmake_build(ci_cpu, 'build', '-j2') + make_standalone_crt(ci_cpu, 'build') + make_cpp_tests(ci_cpu, 'build') + {{ m.upload_artifacts(tag='cpu', filenames=tvm_multilib_tsim + tvm_allvisible + crttest + cpptest + standalone_crt) }} + ci_setup(ci_cpu) + // sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh" + // TODO(@jroesch): need to resolve CI issue will turn back on in follow up patch + sh (script: "${docker_run} ${ci_cpu} ./tests/scripts/task_rust.sh", label: 'Rust build and test') +{% endcall %} + +{% set test_method_names = [] %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="integration: CPU", + node="CPU-SMALL", + num_shards=4, + ws="tvm/integration-python-cpu", + platform="cpu", + docker_image="ci_cpu", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='cpu') }} + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) +{% endcall %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="unittest: CPU", + node="CPU-SMALL", + ws="tvm/ut-python-cpu", + platform="cpu", + num_shards=1, + docker_image="ci_cpu", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='cpu') }} + ci_setup(ci_cpu) + cpp_unittest(ci_cpu) + micro_cpp_unittest(ci_cpu) + python_unittest(ci_cpu) + fsim_test(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh", + label: 'Run VTA tests in TSIM', + ) +{% endcall %} +{% call(shard_index, num_shards) m.sharded_test_step( + name="frontend: CPU", + node="CPU-SMALL", + ws="tvm/frontend-python-cpu", + platform="cpu", + num_shards=1, + docker_image="ci_cpu", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='cpu') }} + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_frontend_cpu.sh", + label: 'Run Python frontend tests', + ) +{% endcall %} + +{{ m.invoke_tests(test_method_names) -}} \ No newline at end of file diff --git a/ci/jenkins/Deploy.groovy.j2 b/ci/jenkins/templates/docker_jenkinsfile.groovy.j2 similarity index 51% rename from ci/jenkins/Deploy.groovy.j2 rename to ci/jenkins/templates/docker_jenkinsfile.groovy.j2 index 5cfffc7caef3..21013e546340 100644 --- a/ci/jenkins/Deploy.groovy.j2 +++ b/ci/jenkins/templates/docker_jenkinsfile.groovy.j2 @@ -1,21 +1,103 @@ -/* -stage('Build packages') { - parallel 'conda CPU': { - node('CPU') { - sh "${docker_run} tlcpack/conda-cpu ./conda/build_cpu.sh +{% include "utils/base.groovy.j2" with context %} +{% import 'utils/macros.j2' as m with context -%} + +def ecr_push(full_name) { + aws_account_id = sh( + returnStdout: true, + script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', + label: 'Get AWS ID' + ).trim() + + def ecr_name = "${aws_account_id}.{{ aws_ecr_url }}/${full_name}" + try { + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION={{ aws_default_region }}', + "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { + sh( + script: ''' + set -eux + aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO + ''', + label: 'Log in to ECR' + ) + sh( + script: """ + set -x + . ${jenkins_scripts_root}/retry.sh + docker tag ${full_name} \$AWS_ECR_REPO/${full_name} + retry 5 docker push \$AWS_ECR_REPO/${full_name} + """, + label: 'Upload image to ECR' + ) + } + } finally { + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION={{ aws_default_region }}', + "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { + sh( + script: 'docker logout $AWS_ECR_REPO', + label: 'Clean up login credentials' + ) + } + } + return ecr_name +} + +def ecr_pull(full_name) { + aws_account_id = sh( + returnStdout: true, + script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', + label: 'Get AWS ID' + ).trim() + + try { + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION={{ aws_default_region }}', + "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { + sh( + script: ''' + set -eux + aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO + ''', + label: 'Log in to ECR' + ) + sh( + script: """ + set -eux + . ci/scripts/retry.sh + retry 5 docker pull ${full_name} + """, + label: 'Pull image from ECR' + ) } - }, - 'conda cuda': { - node('CPU') { - sh "${docker_run} tlcpack/conda-cuda90 ./conda/build_cuda.sh - sh "${docker_run} tlcpack/conda-cuda100 ./conda/build_cuda.sh + } finally { + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION={{ aws_default_region }}', + "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { + sh( + script: 'docker logout $AWS_ECR_REPO', + label: 'Clean up login credentials' + ) } } -// Here we could upload the packages to anaconda for releases -// and/or the main branch } -*/ +def build_image(image_name) { + hash = sh( + returnStdout: true, + script: 'git log -1 --format=\'%h\'' + ).trim() + def full_name = "${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}" + sh( + script: "${docker_build} ${image_name} --spec ${full_name}", + label: 'Build docker image' + ) + return ecr_push(full_name) +} def update_docker(ecr_image, hub_image) { if (ecr_image == null) { @@ -40,63 +122,28 @@ def update_docker(ecr_image, hub_image) { ) } -def deploy_docs() { - // Note: This code must stay in the Jenkinsfile to ensure that it runs - // from a trusted context only - sh( - script: ''' - set -eux - rm -rf tvm-site - git clone -b $DOCS_DEPLOY_BRANCH --depth=1 https://github.com/apache/tvm-site - cd tvm-site - git status - git checkout -B $DOCS_DEPLOY_BRANCH - - git ls-tree HEAD docs/ --name-only | grep -vP '^docs/v\\d' | xargs rm -rf - mkdir -p docs - tar xf ../docs.tgz -C docs - COMMIT=$(cat docs/commit_hash) - git add . - git config user.name tvm-bot - git config user.email 95660001+tvm-bot@users.noreply.github.com - git commit -m"deploying docs (apache/tvm@$COMMIT)" - git status - ''', - label: 'Unpack docs and update tvm-site' +stage('Docker Image Build') { + parallel( + {% for image in images %} + '{{ image.name }}': { + node('{{ image.platform }}') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + // We're purposefully not setting the built image here since they + // are not yet being uploaded to tlcpack + // {{ image.name }} = build_image('{{ image.name }}') + built_{{ image.name }} = build_image('{{ image.name }}'); + } + } + }, + {% endfor %} ) - - withCredentials([string( - credentialsId: 'docs-push-token', - variable: 'GITHUB_TOKEN', - )]) { - sh( - script: ''' - cd tvm-site - git remote add deploy https://$GITHUB_TOKEN:x-oauth-basic@github.com/apache/tvm-site.git - git push deploy $DOCS_DEPLOY_BRANCH || true - ''', - label: 'Upload docs to apache/tvm-site' - ) - } } - def deploy() { stage('Deploy') { if (env.BRANCH_NAME == 'main') { parallel( - {% call m.deploy_step( - name="Deploy Docs", - feature_flag="env.DOCS_DEPLOY_ENABLED == 'yes'", - ws="tvm/deploy-docs", - ) %} - init_git() - sh( - script: "./${jenkins_scripts_root}/s3.py --action download --bucket ${s3_bucket} --prefix ${s3_prefix}/docs --items docs.tgz", - label: 'Download docs folder from S3', - ) - deploy_docs() - {% endcall %} {% call m.deploy_step( name="Upload built Docker images", feature_flag="env.DEPLOY_DOCKER_IMAGES == 'yes' && rebuild_docker_images && upstream_revision != null", @@ -131,7 +178,7 @@ def deploy() { {% endcall %} {% call m.deploy_step( name="Tag tlcpackstaging to tlcpack", - feature_flag="env.DOCS_DEPLOY_ENABLED == 'yes'", + feature_flag="env.DEPLOY_DOCKER_IMAGES == 'yes'", ws="tvm/tag-images", ) %} init_git() @@ -172,3 +219,5 @@ def deploy() { } } } + +deploy() diff --git a/ci/jenkins/templates/gpu_jenkinsfile.groovy.j2 b/ci/jenkins/templates/gpu_jenkinsfile.groovy.j2 new file mode 100644 index 000000000000..674936275347 --- /dev/null +++ b/ci/jenkins/templates/gpu_jenkinsfile.groovy.j2 @@ -0,0 +1,190 @@ +{% include "utils/base.groovy.j2" with context %} +{% import 'utils/macros.j2' as m with context -%} + +{% call m.invoke_build( + name='BUILD: GPU', + node='CPU-SMALL', + condition='!skip_ci', + ws='tvm/build-gpu', + docker_image='ci_gpu', +) %} + sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh build" + cmake_build("${ci_gpu} --no-gpu", 'build', '-j2') + make_standalone_crt("${ci_gpu} --no-gpu", 'build') + {{ m.upload_artifacts(tag='gpu', filenames=tvm_multilib + tvm_allvisible + microtvm_template_projects + crttest + standalone_crt) }} + + // compiler test + sh "rm -rf build" + sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu_other.sh build" + cmake_build("${ci_gpu} --no-gpu", 'build', '-j2') + make_standalone_crt("${ci_gpu} --no-gpu", 'build') + {{ m.upload_artifacts(tag='gpu2', filenames=tvm_lib + crttest + standalone_crt) }} +{% endcall %} + +{% set test_method_names = [] %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="unittest: GPU", + num_shards=3, + node="GPU", + ws="tvm/ut-python-gpu", + platform="gpu", + docker_image="ci_gpu", + test_method_names=test_method_names, +) %} + {% if shard_index == 1 %} + {{ m.download_artifacts(tag='gpu2') }} + sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu_other.sh build" + // These require a GPU to finish the build (i.e. CUDA needs to be load-able) + make_standalone_crt(ci_gpu, 'build') + // make_cpp_tests(ci_gpu, 'build') + // cpp_unittest(ci_gpu) + + sh "rm -rf build" + {{ m.download_artifacts(tag='gpu') }} + ci_setup(ci_gpu) + sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh build" + make_standalone_crt(ci_gpu, 'build') + make_cpp_tests(ci_gpu, 'build') + cpp_unittest(ci_gpu) + micro_cpp_unittest(ci_gpu) + {% else %} + {{ m.download_artifacts(tag='gpu') }} + ci_setup(ci_gpu) + {% endif %} + {% if shard_index == 2 or num_shards < 2 %} + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh", + label: 'Run Java unit tests', + ) + {% endif %} + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", + label: 'Run Python GPU unit tests', + ) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", + label: 'Run Python GPU integration tests', + ) +{% endcall %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="topi: GPU", + node="GPU", + num_shards=3, + ws="tvm/topi-python-gpu", + platform="gpu", + docker_image="ci_gpu", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='gpu') }} + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) +{% endcall %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="frontend: GPU", + node="GPU", + num_shards=6, + ws="tvm/frontend-python-gpu", + platform="gpu", + docker_image="ci_gpu", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='gpu') }} + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) +{% endcall %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="docs: GPU", + node="GPU", + num_shards=1, + ws="tvm/docs-python-gpu", + platform="gpu", + docker_image="ci_gpu", + condition="!skip_ci", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='gpu') }} + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_docs.sh", + label: 'Build docs', + ) + {{ m.upload_artifacts(tag='docs', filenames=["docs.tgz"]) }} + sh( + script: "aws s3 cp --no-progress _docs s3://${s3_bucket}/${s3_prefix}/docs --recursive", + label: 'Upload docs to S3', + ) +{% endcall %} + + +{{ m.invoke_tests(test_method_names) }} + + +def deploy_docs() { + // Note: This code must stay in the Jenkinsfile to ensure that it runs + // from a trusted context only + sh( + script: ''' + set -eux + rm -rf tvm-site + git clone -b $DOCS_DEPLOY_BRANCH --depth=1 https://github.com/apache/tvm-site + cd tvm-site + git status + git checkout -B $DOCS_DEPLOY_BRANCH + + git ls-tree HEAD docs/ --name-only | grep -vP '^docs/v\\d' | xargs rm -rf + mkdir -p docs + tar xf ../docs.tgz -C docs + COMMIT=$(cat docs/commit_hash) + git add . + git config user.name tvm-bot + git config user.email 95660001+tvm-bot@users.noreply.github.com + git commit -m"deploying docs (apache/tvm@$COMMIT)" + git status + ''', + label: 'Unpack docs and update tvm-site' + ) + + withCredentials([string( + credentialsId: 'docs-push-token', + variable: 'GITHUB_TOKEN', + )]) { + sh( + script: ''' + cd tvm-site + git remote add deploy https://$GITHUB_TOKEN:x-oauth-basic@github.com/apache/tvm-site.git + git push deploy $DOCS_DEPLOY_BRANCH || true + ''', + label: 'Upload docs to apache/tvm-site' + ) + } +} + +def deploy() { + stage('Deploy') { + if (env.BRANCH_NAME == 'main') { + parallel( + {% call m.deploy_step( + name="Deploy Docs", + feature_flag="env.DOCS_DEPLOY_ENABLED == 'yes'", + ws="tvm/deploy-docs", + ) %} + init_git() + {{ m.download_artifacts(tag='docs') }} + deploy_docs() + {% endcall %} + ) + } + } +} + +deploy() diff --git a/ci/jenkins/templates/hexagon_jenkinsfile.groovy.j2 b/ci/jenkins/templates/hexagon_jenkinsfile.groovy.j2 new file mode 100644 index 000000000000..d34a169e2a0f --- /dev/null +++ b/ci/jenkins/templates/hexagon_jenkinsfile.groovy.j2 @@ -0,0 +1,47 @@ +{% include "utils/base.groovy.j2" with context %} +{% import 'utils/macros.j2' as m with context -%} + +{% call m.invoke_build( + name='BUILD: Hexagon', + node='CPU-SMALL', + condition='!skip_ci && is_docs_only_build != 1', + ws='tvm/build-hexagon', + docker_image='ci_hexagon', +) %} + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_config_build_hexagon.sh build", + label: 'Create Hexagon cmake config', + ) + cmake_build(ci_hexagon, 'build', '-j2') + make_cpp_tests(ci_hexagon, 'build') + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_build_hexagon_api.sh", + label: 'Build Hexagon API', + ) + {{ m.upload_artifacts(tag='hexagon', filenames=tvm_lib + cpptest + hexagon_api) }} +{% endcall %} + + +{% set test_method_names = [] %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="test: Hexagon", + node="CPU-SMALL", + ws="tvm/test-hexagon", + platform="hexagon", + docker_image="ci_hexagon", + test_method_names=test_method_names, + num_shards=8, +) %} + {{ m.download_artifacts(tag='hexagon') }} + ci_setup(ci_hexagon) + {% if shard_index == 1 %} + cpp_unittest(ci_hexagon) + {% endif %} + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) +{% endcall %} + +{{ m.invoke_tests(test_method_names) -}} diff --git a/ci/jenkins/templates/i386_jenkinsfile.groovy.j2 b/ci/jenkins/templates/i386_jenkinsfile.groovy.j2 new file mode 100644 index 000000000000..b1921600bf86 --- /dev/null +++ b/ci/jenkins/templates/i386_jenkinsfile.groovy.j2 @@ -0,0 +1,49 @@ +{% include "utils/base.groovy.j2" with context %} +{% import 'utils/macros.j2' as m with context -%} + +{% call m.invoke_build( + name='BUILD: i386', + node='CPU-SMALL', + condition='!skip_ci && is_docs_only_build != 1', + ws='tvm/build-i386', + docker_image='ci_i386', +) %} + sh ( + script: "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh build", + label: 'Create i386 cmake config', + ) + cmake_build(ci_i386, 'build', '-j2') + make_standalone_crt(ci_i386, 'build') + make_cpp_tests(ci_i386, 'build') + {{ m.upload_artifacts(tag='i386', filenames=tvm_multilib_tsim + standalone_crt + crttest + cpptest) }} +{% endcall %} + + +{% set test_method_names = [] %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="python: i386", + node="CPU-SMALL", + num_shards=3, + ws="tvm/integration-python-i386", + platform="i386", + docker_image="ci_i386", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='i386') }} + ci_setup(ci_i386) + {% if shard_index == 1 %} + cpp_unittest(ci_i386) + micro_cpp_unittest(ci_i386) + {% endif %} + python_unittest(ci_i386) + sh ( + script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", + label: 'Run i386 integration tests', + ) + {% if shard_index == 2 or num_shards < 2 %} + fsim_test(ci_i386) + {% endif %} +{% endcall %} + +{{ m.invoke_tests(test_method_names) -}} diff --git a/ci/jenkins/templates/lint_jenkinsfile.groovy.j2 b/ci/jenkins/templates/lint_jenkinsfile.groovy.j2 new file mode 100644 index 000000000000..0e2b4e5061b5 --- /dev/null +++ b/ci/jenkins/templates/lint_jenkinsfile.groovy.j2 @@ -0,0 +1,42 @@ +{% include "utils/base.groovy.j2" with context %} +{% import 'utils/macros.j2' as m with context -%} + +{% macro sharded_lint_step(name, num_shards, docker_image, node, ws) %} +{% for shard_index in range(1, num_shards + 1) %} + '{{ name }} {{ shard_index }} of {{ num_shards }}': { + node('{{ node }}') { + ws({{ m.per_exec_ws(ws) }}) { + init_git() + docker_init({{ docker_image }}) + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'TVM_NUM_SHARDS={{ num_shards }}', + 'TEST_STEP_NAME={{ name }}', + 'TVM_SHARD_INDEX={{ shard_index - 1 }}', + "SKIP_SLOW_TESTS=${skip_slow_tests}"], { + {{ caller() | trim | indent(width=6) }} + }) + } + } + } + }, +{% endfor %} +{% endmacro %} + +stage('Lint') { + parallel( + {% call sharded_lint_step( + name='Lint', + num_shards=2, + node='CPU-SMALL', + ws='tvm/lint', + docker_image='ci_lint', + ) + %} + sh ( + script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", + label: 'Run lint', + ) + {% endcall %} + ) +} diff --git a/ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 b/ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 new file mode 100644 index 000000000000..073a109c8232 --- /dev/null +++ b/ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 @@ -0,0 +1,38 @@ +{% include "utils/base.groovy.j2" with context %} +{% import 'utils/macros.j2' as m with context -%} + +{% call m.invoke_build( + name='BUILD: CPU MINIMAL', + node='CPU-SMALL', + condition='!skip_ci && is_docs_only_build != 1', + ws='tvm/build-cpu-minimal', + docker_image='ci_minimal', +) %} + sh ( + script: "${docker_run} ${ci_minimal} ./tests/scripts/task_config_build_minimal.sh build", + label: 'Create CPU minimal cmake config', + ) + cmake_build(ci_minimal, 'build', '-j2') + make_cpp_tests(ci_minimal, 'build') + {{ m.upload_artifacts(tag='cpu-minimal', filenames=tvm_lib + tvm_allvisible + cpptest) }} +{% endcall %} + + +{% set test_method_names = [] %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="unittest: CPU MINIMAL", + node="CPU-SMALL", + num_shards=1, + ws="tvm/ut-python-cpu-minimal", + platform="minimal", + docker_image="ci_minimal", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='cpu-minimal') }} + cpp_unittest(ci_minimal) + python_unittest(ci_minimal) +{% endcall %} + + +{{ m.invoke_tests(test_method_names) -}} diff --git a/ci/jenkins/templates/riscv_jenkinsfile.groovy.j2 b/ci/jenkins/templates/riscv_jenkinsfile.groovy.j2 new file mode 100644 index 000000000000..c1655a786e07 --- /dev/null +++ b/ci/jenkins/templates/riscv_jenkinsfile.groovy.j2 @@ -0,0 +1,46 @@ +{% include "utils/base.groovy.j2" with context %} +{% import 'utils/macros.j2' as m with context -%} + +{% call m.invoke_build( + name='BUILD: RISC-V', + node='CPU-SMALL', + condition='!skip_ci && is_docs_only_build != 1', + ws='tvm/build-riscv', + docker_image='ci_riscv', +) %} + sh ( + script: "${docker_run} ${ci_riscv} ./tests/scripts/task_config_build_riscv.sh build", + label: 'Create RISC-V cmake config', + ) + cmake_build(ci_riscv, 'build', '-j2') + make_standalone_crt(ci_riscv, 'build') + make_cpp_tests(ci_riscv, 'build') + {{ m.upload_artifacts(tag='riscv', filenames=tvm_lib + tvm_allvisible + standalone_crt + crttest + cpptest + microtvm_template_projects) }} +{% endcall %} + + + +{% set test_method_names = [] %} + +{% call(shard_index, num_shards) m.sharded_test_step( + name="test: RISC-V", + node="CPU-SMALL", + ws="tvm/test-riscv", + platform="riscv", + docker_image="ci_riscv", + num_shards=1, + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='riscv') }} + ci_setup(ci_riscv) + {% if shard_index == 1%} + cpp_unittest(ci_cortexm) + micro_cpp_unittest(ci_cortexm) + {% endif %} + sh ( + script: "${docker_run} ${ci_riscv} ./tests/scripts/task_riscv_microtvm.sh", + label: 'Run microTVM tests', + ) +{% endcall %} + +{{ m.invoke_tests(test_method_names) -}} diff --git a/ci/jenkins/templates/utils/Build.groovy.j2 b/ci/jenkins/templates/utils/Build.groovy.j2 new file mode 100644 index 000000000000..362e8341350f --- /dev/null +++ b/ci/jenkins/templates/utils/Build.groovy.j2 @@ -0,0 +1,57 @@ +def ci_setup(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh", + label: 'Clean up old workspace', + ) +} + +def python_unittest(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh", + label: 'Run Python unit tests', + ) +} + +def fsim_test(image) { + sh ( + script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh", + label: 'Run VTA tests in FSIM', + ) +} + +def make_standalone_crt(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target standalone_crt \ + --build-dir build + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target crttest \ + --build-dir build + """, + label: 'Make standalone CRT', + ) +} + +def make_cpp_tests(image, build_dir) { + sh ( + script: """ + set -eux + ${docker_run} ${image} python3 ./tests/scripts/task_build.py \ + --sccache-bucket tvm-sccache-prod \ + --cmake-target cpptest \ + --build-dir ${build_dir} + """, + label: 'Make C++ tests', + ) +} + +def cmake_build(image, path, make_flag) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod", + label: 'Run cmake build', + ) +} diff --git a/ci/jenkins/Prepare.groovy.j2 b/ci/jenkins/templates/utils/Prepare.groovy.j2 similarity index 98% rename from ci/jenkins/Prepare.groovy.j2 rename to ci/jenkins/templates/utils/Prepare.groovy.j2 index 6a82a887ede6..099bde5bc770 100644 --- a/ci/jenkins/Prepare.groovy.j2 +++ b/ci/jenkins/templates/utils/Prepare.groovy.j2 @@ -75,7 +75,8 @@ def docker_init(image) { if (image.contains("amazonaws.com")) { // If this string is in the image name it's from ECR and needs to be pulled // with the right credentials - ecr_pull(image) + // ecr_pull(image) + sh "echo Pulling from AWS is not implemented && exit 1" } else { sh( script: """ diff --git a/ci/jenkins/templates/utils/Test.groovy.j2 b/ci/jenkins/templates/utils/Test.groovy.j2 new file mode 100644 index 000000000000..1e70869cfc2c --- /dev/null +++ b/ci/jenkins/templates/utils/Test.groovy.j2 @@ -0,0 +1,13 @@ +def cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + label: 'Run C++ tests', + ) +} + +def micro_cpp_unittest(image) { + sh ( + script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_microtvm_cpp_tests.sh build", + label: 'Run microTVM C++ tests', + ) +} diff --git a/ci/jenkins/templates/utils/base.groovy.j2 b/ci/jenkins/templates/utils/base.groovy.j2 index 304b6c4b378c..a5c53b385885 100644 --- a/ci/jenkins/templates/utils/base.groovy.j2 +++ b/ci/jenkins/templates/utils/base.groovy.j2 @@ -99,9 +99,9 @@ jenkins_scripts_root = "ci/scripts/jenkins" // General note: Jenkins has limits on the size of a method (or top level code) // that are pretty strict, so most usage of groovy methods in these templates // are purely to satisfy the JVM -{% include "ci/jenkins/Prepare.groovy.j2" %} -{% include "ci/jenkins/Build.groovy.j2" %} -{% include "ci/jenkins/Test.groovy.j2" %} +{% include "utils/Prepare.groovy.j2" %} +{% include "utils/Build.groovy.j2" %} +{% include "utils/Test.groovy.j2" %} cancel_previous_build() diff --git a/ci/jenkins/templates/wasm_jenkinsfile.groovy.j2 b/ci/jenkins/templates/wasm_jenkinsfile.groovy.j2 new file mode 100644 index 000000000000..161375d52d26 --- /dev/null +++ b/ci/jenkins/templates/wasm_jenkinsfile.groovy.j2 @@ -0,0 +1,24 @@ +{% include "utils/base.groovy.j2" with context %} +{% import 'utils/macros.j2' as m with context -%} + +{% call m.invoke_build( + name='BUILD: WASM', + node='CPU-SMALL', + condition='!skip_ci && is_docs_only_build != 1', + ws='tvm/build-wasm', + docker_image='ci_wasm', +) %} + sh ( + script: "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh build", + label: 'Create WASM cmake config', + ) + cmake_build(ci_wasm, 'build', '-j2') + make_standalone_crt(ci_wasm, 'build') + make_cpp_tests(ci_wasm, 'build') + cpp_unittest(ci_wasm) + ci_setup(ci_wasm) + sh ( + script: "${docker_run} ${ci_wasm} ./tests/scripts/task_web_wasm.sh", + label: 'Run WASM lint and tests', + ) +{% endcall %} From 68104857a5f1f9e6a1ce4d00b0666bc114fc6e45 Mon Sep 17 00:00:00 2001 From: driazati Date: Fri, 2 Dec 2022 14:17:50 -0800 Subject: [PATCH 3/5] Comments --- ci/jenkins/generated/arm_jenkinsfile.groovy | 18 +++++++++++++++++- .../generated/cortexm_jenkinsfile.groovy | 18 +++++++++++++++++- ci/jenkins/generated/cpu_jenkinsfile.groovy | 18 +++++++++++++++++- ci/jenkins/generated/docker_jenkinsfile.groovy | 18 +++++++++++++++++- ci/jenkins/generated/gpu_jenkinsfile.groovy | 18 +++++++++++++++++- .../generated/hexagon_jenkinsfile.groovy | 18 +++++++++++++++++- ci/jenkins/generated/i386_jenkinsfile.groovy | 18 +++++++++++++++++- ci/jenkins/generated/lint_jenkinsfile.groovy | 18 +++++++++++++++++- .../generated/minimal_jenkinsfile.groovy | 18 +++++++++++++++++- ci/jenkins/generated/riscv_jenkinsfile.groovy | 18 +++++++++++++++++- ci/jenkins/generated/wasm_jenkinsfile.groovy | 18 +++++++++++++++++- ci/jenkins/templates/arm_jenkinsfile.groovy.j2 | 16 ++++++++++++++++ .../templates/cortexm_jenkinsfile.groovy.j2 | 16 ++++++++++++++++ ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 | 16 ++++++++++++++++ .../templates/docker_jenkinsfile.groovy.j2 | 16 ++++++++++++++++ ci/jenkins/templates/gpu_jenkinsfile.groovy.j2 | 16 ++++++++++++++++ .../templates/hexagon_jenkinsfile.groovy.j2 | 16 ++++++++++++++++ .../templates/i386_jenkinsfile.groovy.j2 | 16 ++++++++++++++++ .../templates/lint_jenkinsfile.groovy.j2 | 16 ++++++++++++++++ .../templates/minimal_jenkinsfile.groovy.j2 | 16 ++++++++++++++++ .../templates/riscv_jenkinsfile.groovy.j2 | 16 ++++++++++++++++ .../templates/wasm_jenkinsfile.groovy.j2 | 16 ++++++++++++++++ docker/dev_common.sh | 5 ++--- 23 files changed, 365 insertions(+), 14 deletions(-) diff --git a/ci/jenkins/generated/arm_jenkinsfile.groovy b/ci/jenkins/generated/arm_jenkinsfile.groovy index cd6df5b208a9..0bb370071270 100644 --- a/ci/jenkins/generated/arm_jenkinsfile.groovy +++ b/ci/jenkins/generated/arm_jenkinsfile.groovy @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. #!groovy // -*- mode: groovy -*- @@ -45,7 +61,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-01T16:52:32.888364 +// Generated at 2022-12-02T14:16:27.362930 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/cortexm_jenkinsfile.groovy b/ci/jenkins/generated/cortexm_jenkinsfile.groovy index 36ec0bd38b45..4bbfe723d45c 100644 --- a/ci/jenkins/generated/cortexm_jenkinsfile.groovy +++ b/ci/jenkins/generated/cortexm_jenkinsfile.groovy @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. #!groovy // -*- mode: groovy -*- @@ -45,7 +61,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-01T17:04:06.689168 +// Generated at 2022-12-02T14:16:27.413674 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/cpu_jenkinsfile.groovy b/ci/jenkins/generated/cpu_jenkinsfile.groovy index 8aee3c26ab11..fd5b16bd512d 100644 --- a/ci/jenkins/generated/cpu_jenkinsfile.groovy +++ b/ci/jenkins/generated/cpu_jenkinsfile.groovy @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. #!groovy // -*- mode: groovy -*- @@ -45,7 +61,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-01T17:05:22.995625 +// Generated at 2022-12-02T14:16:27.519764 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/docker_jenkinsfile.groovy b/ci/jenkins/generated/docker_jenkinsfile.groovy index 593639163cc6..37382f1724bc 100644 --- a/ci/jenkins/generated/docker_jenkinsfile.groovy +++ b/ci/jenkins/generated/docker_jenkinsfile.groovy @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. #!groovy // -*- mode: groovy -*- @@ -45,7 +61,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-01T17:07:24.754092 +// Generated at 2022-12-02T14:16:27.485098 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/gpu_jenkinsfile.groovy b/ci/jenkins/generated/gpu_jenkinsfile.groovy index c3f3723d8b53..1e3e8bb7feb4 100644 --- a/ci/jenkins/generated/gpu_jenkinsfile.groovy +++ b/ci/jenkins/generated/gpu_jenkinsfile.groovy @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. #!groovy // -*- mode: groovy -*- @@ -45,7 +61,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-01T17:07:24.698570 +// Generated at 2022-12-02T14:16:27.431129 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/hexagon_jenkinsfile.groovy b/ci/jenkins/generated/hexagon_jenkinsfile.groovy index 4496225537c1..84be48d5ae87 100644 --- a/ci/jenkins/generated/hexagon_jenkinsfile.groovy +++ b/ci/jenkins/generated/hexagon_jenkinsfile.groovy @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. #!groovy // -*- mode: groovy -*- @@ -45,7 +61,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-01T17:07:24.608492 +// Generated at 2022-12-02T14:16:27.345081 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/i386_jenkinsfile.groovy b/ci/jenkins/generated/i386_jenkinsfile.groovy index 5335b0af4e01..ab6f2666fa83 100644 --- a/ci/jenkins/generated/i386_jenkinsfile.groovy +++ b/ci/jenkins/generated/i386_jenkinsfile.groovy @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. #!groovy // -*- mode: groovy -*- @@ -45,7 +61,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-01T17:07:24.663072 +// Generated at 2022-12-02T14:16:27.397208 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/lint_jenkinsfile.groovy b/ci/jenkins/generated/lint_jenkinsfile.groovy index 6173049478d0..966227c057fb 100644 --- a/ci/jenkins/generated/lint_jenkinsfile.groovy +++ b/ci/jenkins/generated/lint_jenkinsfile.groovy @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. #!groovy // -*- mode: groovy -*- @@ -45,7 +61,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-01T17:07:24.720777 +// Generated at 2022-12-02T14:16:27.452732 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/minimal_jenkinsfile.groovy b/ci/jenkins/generated/minimal_jenkinsfile.groovy index c5f625248424..c304de203566 100644 --- a/ci/jenkins/generated/minimal_jenkinsfile.groovy +++ b/ci/jenkins/generated/minimal_jenkinsfile.groovy @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. #!groovy // -*- mode: groovy -*- @@ -45,7 +61,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-01T17:07:24.737352 +// Generated at 2022-12-02T14:16:27.468739 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/riscv_jenkinsfile.groovy b/ci/jenkins/generated/riscv_jenkinsfile.groovy index 5892cba00e41..d84d8af825e2 100644 --- a/ci/jenkins/generated/riscv_jenkinsfile.groovy +++ b/ci/jenkins/generated/riscv_jenkinsfile.groovy @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. #!groovy // -*- mode: groovy -*- @@ -45,7 +61,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-01T17:07:24.646306 +// Generated at 2022-12-02T14:16:27.381161 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/wasm_jenkinsfile.groovy b/ci/jenkins/generated/wasm_jenkinsfile.groovy index 773154010c16..69cfd1868018 100644 --- a/ci/jenkins/generated/wasm_jenkinsfile.groovy +++ b/ci/jenkins/generated/wasm_jenkinsfile.groovy @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. #!groovy // -*- mode: groovy -*- @@ -45,7 +61,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-01T17:07:24.772820 +// Generated at 2022-12-02T14:16:27.503268 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 b/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 index 8840ede5921a..d6c4b845fce8 100644 --- a/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. {% include "utils/base.groovy.j2" with context %} {% import 'utils/macros.j2' as m with context -%} diff --git a/ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 b/ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 index c982ee8176b7..c731fdbe8cdf 100644 --- a/ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. {% include "utils/base.groovy.j2" with context %} {% import 'utils/macros.j2' as m with context -%} diff --git a/ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 b/ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 index 2443ad3a42f0..61c86109d54c 100644 --- a/ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. {% include "utils/base.groovy.j2" with context %} {% import 'utils/macros.j2' as m with context -%} diff --git a/ci/jenkins/templates/docker_jenkinsfile.groovy.j2 b/ci/jenkins/templates/docker_jenkinsfile.groovy.j2 index 21013e546340..016a1c7bc8e9 100644 --- a/ci/jenkins/templates/docker_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/docker_jenkinsfile.groovy.j2 @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. {% include "utils/base.groovy.j2" with context %} {% import 'utils/macros.j2' as m with context -%} diff --git a/ci/jenkins/templates/gpu_jenkinsfile.groovy.j2 b/ci/jenkins/templates/gpu_jenkinsfile.groovy.j2 index 674936275347..4a11a1bc427a 100644 --- a/ci/jenkins/templates/gpu_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/gpu_jenkinsfile.groovy.j2 @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. {% include "utils/base.groovy.j2" with context %} {% import 'utils/macros.j2' as m with context -%} diff --git a/ci/jenkins/templates/hexagon_jenkinsfile.groovy.j2 b/ci/jenkins/templates/hexagon_jenkinsfile.groovy.j2 index d34a169e2a0f..140c227154a1 100644 --- a/ci/jenkins/templates/hexagon_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/hexagon_jenkinsfile.groovy.j2 @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. {% include "utils/base.groovy.j2" with context %} {% import 'utils/macros.j2' as m with context -%} diff --git a/ci/jenkins/templates/i386_jenkinsfile.groovy.j2 b/ci/jenkins/templates/i386_jenkinsfile.groovy.j2 index b1921600bf86..1825e0cbd6bd 100644 --- a/ci/jenkins/templates/i386_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/i386_jenkinsfile.groovy.j2 @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. {% include "utils/base.groovy.j2" with context %} {% import 'utils/macros.j2' as m with context -%} diff --git a/ci/jenkins/templates/lint_jenkinsfile.groovy.j2 b/ci/jenkins/templates/lint_jenkinsfile.groovy.j2 index 0e2b4e5061b5..7423ee171007 100644 --- a/ci/jenkins/templates/lint_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/lint_jenkinsfile.groovy.j2 @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. {% include "utils/base.groovy.j2" with context %} {% import 'utils/macros.j2' as m with context -%} diff --git a/ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 b/ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 index 073a109c8232..f6734f060993 100644 --- a/ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. {% include "utils/base.groovy.j2" with context %} {% import 'utils/macros.j2' as m with context -%} diff --git a/ci/jenkins/templates/riscv_jenkinsfile.groovy.j2 b/ci/jenkins/templates/riscv_jenkinsfile.groovy.j2 index c1655a786e07..35bb84cdf7e5 100644 --- a/ci/jenkins/templates/riscv_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/riscv_jenkinsfile.groovy.j2 @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. {% include "utils/base.groovy.j2" with context %} {% import 'utils/macros.j2' as m with context -%} diff --git a/ci/jenkins/templates/wasm_jenkinsfile.groovy.j2 b/ci/jenkins/templates/wasm_jenkinsfile.groovy.j2 index 161375d52d26..085fc12220b5 100644 --- a/ci/jenkins/templates/wasm_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/wasm_jenkinsfile.groovy.j2 @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. {% include "utils/base.groovy.j2" with context %} {% import 'utils/macros.j2' as m with context -%} diff --git a/docker/dev_common.sh b/docker/dev_common.sh index 016d49e9f792..1ec04ecc90d8 100644 --- a/docker/dev_common.sh +++ b/docker/dev_common.sh @@ -29,9 +29,8 @@ GIT_TOPLEVEL=$(cd $(dirname ${BASH_SOURCE[0]}) && git rev-parse --show-toplevel) function lookup_image_spec() { - img_line=$(python3 "${GIT_TOPLEVEL}/ci/jenkins/data.py" "$1") - if [ -n "${img_line}" ]; then - img_spec=$(echo "${img_line}" | sed -E "s/${1} = \"([^\"]*)\"/\1/") + img_spec=$(python3 "${GIT_TOPLEVEL}/ci/jenkins/data.py" "$1") + if [ -n "${img_spec}" ]; then has_similar_docker_image=1 docker inspect "${1}" &>/dev/null || has_similar_docker_image=0 if [ ${has_similar_docker_image} -ne 0 ]; then From c31aba8aed9bf8a0854c341cc28fe344e319ffd6 Mon Sep 17 00:00:00 2001 From: driazati Date: Fri, 2 Dec 2022 17:41:09 -0800 Subject: [PATCH 4/5] Fix lint --- ci/jenkins/templates/arm_jenkinsfile.groovy.j2 | 2 +- ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 | 2 +- ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 b/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 index d6c4b845fce8..6cffd5cbbe66 100644 --- a/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/arm_jenkinsfile.groovy.j2 @@ -96,4 +96,4 @@ ) {% endcall %} -{{ m.invoke_tests(test_method_names) -}} \ No newline at end of file +{{ m.invoke_tests(test_method_names) -}} diff --git a/ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 b/ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 index c731fdbe8cdf..acbc147f408e 100644 --- a/ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/cortexm_jenkinsfile.groovy.j2 @@ -61,4 +61,4 @@ ) {% endcall %} -{{ m.invoke_tests(test_method_names) -}} \ No newline at end of file +{{ m.invoke_tests(test_method_names) -}} diff --git a/ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 b/ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 index 61c86109d54c..fa2be6584ff0 100644 --- a/ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/cpu_jenkinsfile.groovy.j2 @@ -94,4 +94,4 @@ ) {% endcall %} -{{ m.invoke_tests(test_method_names) -}} \ No newline at end of file +{{ m.invoke_tests(test_method_names) -}} From 90ce20555703d10fe8b644c0096bd6f6bb5bf4fd Mon Sep 17 00:00:00 2001 From: driazati Date: Mon, 5 Dec 2022 09:37:43 -0800 Subject: [PATCH 5/5] Fix tests --- ci/jenkins/generated/arm_jenkinsfile.groovy | 3 +-- ci/jenkins/generated/cortexm_jenkinsfile.groovy | 3 +-- ci/jenkins/generated/cpu_jenkinsfile.groovy | 3 +-- ci/jenkins/generated/docker_jenkinsfile.groovy | 3 +-- ci/jenkins/generated/gpu_jenkinsfile.groovy | 3 +-- ci/jenkins/generated/hexagon_jenkinsfile.groovy | 3 +-- ci/jenkins/generated/i386_jenkinsfile.groovy | 3 +-- ci/jenkins/generated/lint_jenkinsfile.groovy | 3 +-- ci/jenkins/generated/minimal_jenkinsfile.groovy | 6 +++--- ci/jenkins/generated/riscv_jenkinsfile.groovy | 3 +-- ci/jenkins/generated/wasm_jenkinsfile.groovy | 3 +-- ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 | 3 ++- ci/jenkins/templates/utils/base.groovy.j2 | 1 - ci/scripts/jenkins/open_docker_update_pr.py | 8 ++++---- tests/python/ci/test_ci.py | 4 ++-- 15 files changed, 21 insertions(+), 31 deletions(-) diff --git a/ci/jenkins/generated/arm_jenkinsfile.groovy b/ci/jenkins/generated/arm_jenkinsfile.groovy index 0bb370071270..f387687528c0 100644 --- a/ci/jenkins/generated/arm_jenkinsfile.groovy +++ b/ci/jenkins/generated/arm_jenkinsfile.groovy @@ -14,7 +14,6 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -#!groovy // -*- mode: groovy -*- // Licensed to the Apache Software Foundation (ASF) under one @@ -61,7 +60,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-02T14:16:27.362930 +// Generated at 2022-12-05T14:48:42.092397 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/cortexm_jenkinsfile.groovy b/ci/jenkins/generated/cortexm_jenkinsfile.groovy index 4bbfe723d45c..76dbbbb7a3d8 100644 --- a/ci/jenkins/generated/cortexm_jenkinsfile.groovy +++ b/ci/jenkins/generated/cortexm_jenkinsfile.groovy @@ -14,7 +14,6 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -#!groovy // -*- mode: groovy -*- // Licensed to the Apache Software Foundation (ASF) under one @@ -61,7 +60,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-02T14:16:27.413674 +// Generated at 2022-12-05T14:48:41.929980 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/cpu_jenkinsfile.groovy b/ci/jenkins/generated/cpu_jenkinsfile.groovy index fd5b16bd512d..ad168c591872 100644 --- a/ci/jenkins/generated/cpu_jenkinsfile.groovy +++ b/ci/jenkins/generated/cpu_jenkinsfile.groovy @@ -14,7 +14,6 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -#!groovy // -*- mode: groovy -*- // Licensed to the Apache Software Foundation (ASF) under one @@ -61,7 +60,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-02T14:16:27.519764 +// Generated at 2022-12-05T14:48:42.120032 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/docker_jenkinsfile.groovy b/ci/jenkins/generated/docker_jenkinsfile.groovy index 37382f1724bc..28e81efb7bf0 100644 --- a/ci/jenkins/generated/docker_jenkinsfile.groovy +++ b/ci/jenkins/generated/docker_jenkinsfile.groovy @@ -14,7 +14,6 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -#!groovy // -*- mode: groovy -*- // Licensed to the Apache Software Foundation (ASF) under one @@ -61,7 +60,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-02T14:16:27.485098 +// Generated at 2022-12-05T14:48:41.987490 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/gpu_jenkinsfile.groovy b/ci/jenkins/generated/gpu_jenkinsfile.groovy index 1e3e8bb7feb4..c226255e0e6e 100644 --- a/ci/jenkins/generated/gpu_jenkinsfile.groovy +++ b/ci/jenkins/generated/gpu_jenkinsfile.groovy @@ -14,7 +14,6 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -#!groovy // -*- mode: groovy -*- // Licensed to the Apache Software Foundation (ASF) under one @@ -61,7 +60,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-02T14:16:27.431129 +// Generated at 2022-12-05T14:48:42.195581 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/hexagon_jenkinsfile.groovy b/ci/jenkins/generated/hexagon_jenkinsfile.groovy index 84be48d5ae87..6296d0c5c868 100644 --- a/ci/jenkins/generated/hexagon_jenkinsfile.groovy +++ b/ci/jenkins/generated/hexagon_jenkinsfile.groovy @@ -14,7 +14,6 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -#!groovy // -*- mode: groovy -*- // Licensed to the Apache Software Foundation (ASF) under one @@ -61,7 +60,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-02T14:16:27.345081 +// Generated at 2022-12-05T14:48:42.065368 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/i386_jenkinsfile.groovy b/ci/jenkins/generated/i386_jenkinsfile.groovy index ab6f2666fa83..f0170f586721 100644 --- a/ci/jenkins/generated/i386_jenkinsfile.groovy +++ b/ci/jenkins/generated/i386_jenkinsfile.groovy @@ -14,7 +14,6 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -#!groovy // -*- mode: groovy -*- // Licensed to the Apache Software Foundation (ASF) under one @@ -61,7 +60,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-02T14:16:27.397208 +// Generated at 2022-12-05T14:48:42.016799 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/lint_jenkinsfile.groovy b/ci/jenkins/generated/lint_jenkinsfile.groovy index 966227c057fb..ee63a1008b13 100644 --- a/ci/jenkins/generated/lint_jenkinsfile.groovy +++ b/ci/jenkins/generated/lint_jenkinsfile.groovy @@ -14,7 +14,6 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -#!groovy // -*- mode: groovy -*- // Licensed to the Apache Software Foundation (ASF) under one @@ -61,7 +60,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-02T14:16:27.452732 +// Generated at 2022-12-05T14:48:42.041376 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/minimal_jenkinsfile.groovy b/ci/jenkins/generated/minimal_jenkinsfile.groovy index c304de203566..4c9f469b3bb6 100644 --- a/ci/jenkins/generated/minimal_jenkinsfile.groovy +++ b/ci/jenkins/generated/minimal_jenkinsfile.groovy @@ -14,7 +14,6 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -#!groovy // -*- mode: groovy -*- // Licensed to the Apache Software Foundation (ASF) under one @@ -61,7 +60,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-02T14:16:27.468739 +// Generated at 2022-12-05T23:21:03.010229 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' @@ -512,9 +511,10 @@ def build() { label: 'Create CPU minimal cmake config', ) cmake_build(ci_minimal, 'build', '-j2') + make_standalone_crt(ci_minimal, 'build') make_cpp_tests(ci_minimal, 'build') sh( - script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/cpu-minimal --items build/libtvm.so build/libtvm_runtime.so build/config.cmake build/libtvm_allvisible.so build/cpptest build/build.ninja build/CMakeFiles/rules.ninja", + script: "./${jenkins_scripts_root}/s3.py --action upload --bucket ${s3_bucket} --prefix ${s3_prefix}/cpu-minimal --items build/libtvm.so build/libtvm_runtime.so build/config.cmake build/libtvm_allvisible.so build/crttest build/cpptest build/build.ninja build/CMakeFiles/rules.ninja build/standalone_crt build/build.ninja", label: 'Upload artifacts to S3', ) } diff --git a/ci/jenkins/generated/riscv_jenkinsfile.groovy b/ci/jenkins/generated/riscv_jenkinsfile.groovy index d84d8af825e2..b485e9906f4c 100644 --- a/ci/jenkins/generated/riscv_jenkinsfile.groovy +++ b/ci/jenkins/generated/riscv_jenkinsfile.groovy @@ -14,7 +14,6 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -#!groovy // -*- mode: groovy -*- // Licensed to the Apache Software Foundation (ASF) under one @@ -61,7 +60,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-02T14:16:27.381161 +// Generated at 2022-12-05T14:48:42.170796 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/generated/wasm_jenkinsfile.groovy b/ci/jenkins/generated/wasm_jenkinsfile.groovy index 69cfd1868018..0c7c2ccf2aaa 100644 --- a/ci/jenkins/generated/wasm_jenkinsfile.groovy +++ b/ci/jenkins/generated/wasm_jenkinsfile.groovy @@ -14,7 +14,6 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -#!groovy // -*- mode: groovy -*- // Licensed to the Apache Software Foundation (ASF) under one @@ -61,7 +60,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-12-02T14:16:27.503268 +// Generated at 2022-12-05T14:48:42.147157 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20221013-060115-61c9742ea' diff --git a/ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 b/ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 index f6734f060993..87db883745cc 100644 --- a/ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 +++ b/ci/jenkins/templates/minimal_jenkinsfile.groovy.j2 @@ -29,8 +29,9 @@ label: 'Create CPU minimal cmake config', ) cmake_build(ci_minimal, 'build', '-j2') + make_standalone_crt(ci_minimal, 'build') make_cpp_tests(ci_minimal, 'build') - {{ m.upload_artifacts(tag='cpu-minimal', filenames=tvm_lib + tvm_allvisible + cpptest) }} + {{ m.upload_artifacts(tag='cpu-minimal', filenames=tvm_lib + tvm_allvisible + crttest + cpptest + standalone_crt) }} {% endcall %} diff --git a/ci/jenkins/templates/utils/base.groovy.j2 b/ci/jenkins/templates/utils/base.groovy.j2 index a5c53b385885..0854091c7a65 100644 --- a/ci/jenkins/templates/utils/base.groovy.j2 +++ b/ci/jenkins/templates/utils/base.groovy.j2 @@ -1,4 +1,3 @@ -#!groovy // -*- mode: groovy -*- // Licensed to the Apache Software Foundation (ASF) under one diff --git a/ci/scripts/jenkins/open_docker_update_pr.py b/ci/scripts/jenkins/open_docker_update_pr.py index f11d00f765df..0531bdb6780b 100755 --- a/ci/scripts/jenkins/open_docker_update_pr.py +++ b/ci/scripts/jenkins/open_docker_update_pr.py @@ -134,16 +134,16 @@ def latest_tlcpackstaging_image(source: str) -> Optional[str]: replacements = {} for line in content: - m = re.match(r"^(ci_[a-zA-Z0-9]+) = \"(.*)\"", line.strip()) + m = re.match(r'"tag": "(.*)",', line.strip()) if m is not None: + image_spec = m.groups()[0] logging.info(f"Found match on line {line.strip()}") - groups = m.groups() - new_image = latest_tlcpackstaging_image(groups[1]) + new_image = latest_tlcpackstaging_image(image_spec) if new_image is None: logging.info(f"No new image found") else: logging.info(f"Using new image {new_image}") - new_line = f"{groups[0]} = '{new_image}'\n" + new_line = f' "tag": "{new_image}",' replacements[line] = new_line # Re-generate the Jenkinsfiles diff --git a/tests/python/ci/test_ci.py b/tests/python/ci/test_ci.py index 710f152c9b1e..cf31b50b63ec 100644 --- a/tests/python/ci/test_ci.py +++ b/tests/python/ci/test_ci.py @@ -1213,7 +1213,7 @@ def test_github_tag_teams(tmpdir_factory, source_type, data, check): }, expected="Using tlcpackstaging tag on tlcpack", expected_images=[ - "ci_arm = 'tlcpack/ci-arm:456-456-abc'", + '"tag": "tlcpack/ci-arm:456-456-abc"', ], ), tlcpack_update=dict( @@ -1235,7 +1235,7 @@ def test_github_tag_teams(tmpdir_factory, source_type, data, check): }, expected="Found newer image, using: tlcpack", expected_images=[ - "ci_arm = 'tlcpack/ci-arm:234-234-abc'", + '"tag": "tlcpack/ci-arm:234-234-abc",', ], ), )