From e1efb85ca9ac9e32e84e89c9a351b27ccc83e83d Mon Sep 17 00:00:00 2001 From: Han Ning Date: Thu, 11 Jul 2024 09:51:02 +0800 Subject: [PATCH] introduce shared workflow for pull request checking Signed-off-by: Han Ning --- .github/actions/build-kernel/action.yml | 53 +++ .github/actions/fetch-refs/action.yml | 15 + .github/actions/prepare-ci-code/action.yml | 17 + .github/actions/test-kernel/action.yml | 55 +++ .github/code/configs/ktest-requirements.txt | 12 + .github/code/configs/ltp-whitelist | 5 + .../code/dockerfiles/x86_64-buster.dockerfile | 47 +++ .github/code/ktest/basic_func.py | 82 +++++ .github/code/ktest/kernel_test.py | 196 +++++++++++ .github/code/ktest/kvm_manager.py | 285 +++++++++++++++ .github/code/ktest/machine_manager.py | 310 ++++++++++++++++ .github/code/ktest/ssh_client.py | 206 +++++++++++ .github/code/ktest/sys_commands.py | 46 +++ .github/code/ktest/sys_pkgpool_api.py | 153 ++++++++ .github/code/ktest/unit_test.py | 331 ++++++++++++++++++ .github/code/scripts/build.sh | 108 ++++++ .github/code/scripts/checkpatch | 21 ++ .github/code/scripts/git-check-backports | 185 ++++++++++ .github/code/tests/ltp.yml | 48 +++ .github/workflows/build-and-test.yml | 37 ++ .github/workflows/static.yml | 59 ++++ 21 files changed, 2271 insertions(+) create mode 100644 .github/actions/build-kernel/action.yml create mode 100644 .github/actions/fetch-refs/action.yml create mode 100644 .github/actions/prepare-ci-code/action.yml create mode 100644 .github/actions/test-kernel/action.yml create mode 100644 .github/code/configs/ktest-requirements.txt create mode 100644 .github/code/configs/ltp-whitelist create mode 100644 .github/code/dockerfiles/x86_64-buster.dockerfile create mode 100644 .github/code/ktest/basic_func.py create mode 100755 .github/code/ktest/kernel_test.py create mode 100644 .github/code/ktest/kvm_manager.py create mode 100644 .github/code/ktest/machine_manager.py create mode 100644 .github/code/ktest/ssh_client.py create mode 100644 .github/code/ktest/sys_commands.py create mode 100644 .github/code/ktest/sys_pkgpool_api.py create mode 100644 .github/code/ktest/unit_test.py create mode 100755 .github/code/scripts/build.sh create mode 100755 .github/code/scripts/checkpatch create mode 100755 .github/code/scripts/git-check-backports create mode 100644 .github/code/tests/ltp.yml create mode 100644 .github/workflows/build-and-test.yml create mode 100644 .github/workflows/static.yml diff --git a/.github/actions/build-kernel/action.yml b/.github/actions/build-kernel/action.yml new file mode 100644 index 0000000000000..5cd539aecf5c5 --- /dev/null +++ b/.github/actions/build-kernel/action.yml @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +name: "Build Kernel" +description: build kernel from source code +inputs: + arch: + description: "architecture for Docker image" + required: true + distro-code: + description: "distro code for the Docker image" + required: true +runs: + using: "composite" + steps: + - name: BuildOSImageWithKernel + run: | + set -e + + # configurations + code_path=${{ github.workspace }}/tmp/.github/code + docker_image=kernel_builder:${{ inputs.arch }}-${{ inputs.distro-code }} + artifacts_root=$(jq .artifacts < /etc/kci.conf | tr -d \") + artifacts_relpath=${{ inputs.arch }}/${{ inputs.distro-code }}/${{ github.event.pull_request.head.sha }} + os_image_root=$(jq .images < /etc/kci.conf | tr -d \") + os_image=${{ inputs.arch }}-${{ inputs.distro-code }}.qcow2 + os_image_size=100G + + # build Docker image + dockerfile="${{ inputs.arch }}-${{ inputs.distro-code }}.dockerfile" + docker build --network=host -t $docker_image -f $code_path/dockerfiles/$dockerfile . + + # build kernel + artifacts_dir=$artifacts_root/$artifacts_relpath + mkdir -p $artifacts_dir + docker run --network=host --rm \ + -v ${{ github.workspace }}:/linux/src \ + -v $artifacts_dir:/linux/output \ + -v $code_path/scripts/build.sh:/bin/build.sh \ + $docker_image \ + build.sh -j32 + + # create OS image + source_image_path=$os_image_root/$os_image + target_image_path=$artifacts_root/$artifacts_relpath/$os_image + cp $source_image_path $target_image_path + qemu-img resize $target_image_path $os_image_size + virt-customize \ + -a $target_image_path \ + --root-password password:kernelci \ + --ssh-inject root \ + --copy-in $code_path/configs/ltp-whitelist:/opt + shell: bash diff --git a/.github/actions/fetch-refs/action.yml b/.github/actions/fetch-refs/action.yml new file mode 100644 index 0000000000000..6290822e16199 --- /dev/null +++ b/.github/actions/fetch-refs/action.yml @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +name: "Fetch Code" +description: "fetch code from GitHub" +inputs: + depth: + description: depth for git fetch command + default: "2000" +runs: + using: "composite" + steps: + - name: FetchRefs + run: git fetch --no-tags --prune --depth=${{ inputs.depth }} origin +refs/heads/*:refs/remotes/origin/* + shell: bash diff --git a/.github/actions/prepare-ci-code/action.yml b/.github/actions/prepare-ci-code/action.yml new file mode 100644 index 0000000000000..d0ed95b05a098 --- /dev/null +++ b/.github/actions/prepare-ci-code/action.yml @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +name: "Prepare CI Code" +description: download ci code from 5.4.143-velinux branch +runs: + using: "composite" + steps: + - name: PrepareCICode + run: | + set -e + + target=tmp + branch=5.4.143-velinux + + git clone -b $branch ${{ github.server_url }}/${{ github.repository }}.git $target + shell: bash diff --git a/.github/actions/test-kernel/action.yml b/.github/actions/test-kernel/action.yml new file mode 100644 index 0000000000000..182d4ec87b135 --- /dev/null +++ b/.github/actions/test-kernel/action.yml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +name: "Test Kernel" +description: test the Kernel +inputs: + arch: + description: "Architecture for the Docker image" + required: true + distro-code: + description: "Distro code for the Docker image" + required: true +runs: + using: "composite" + steps: + - name: PrepareTestEnvironment + run: | + set -e + + artifacts_root=$(jq .artifacts < /etc/kci.conf | tr -d \") + artifacts_dir=$artifacts_root/${{ inputs.arch }}/${{ inputs.distro-code }}/${{ github.event.pull_request.head.sha }} + code_path=${{ github.workspace }}/tmp/.github/code + + python3 -m pip install virtualenv + python3 -m virtualenv $artifacts_dir/venv + source $artifacts_dir/venv/bin/activate + + pip3 install -r $code_path/configs/ktest-requirements.txt + mkdir -p $artifacts_dir/log + shell: bash + - name: RunKernelTest + run: | + set -e + + code_path=${{ github.workspace }}/tmp/.github/code + artifacts_root=$(jq .artifacts < /etc/kci.conf | tr -d \") + artifacts_relpath=${{ inputs.arch }}/${{ inputs.distro-code }}/${{ github.event.pull_request.head.sha }} + artifacts_dir=$artifacts_root/$artifacts_relpath + image=$artifacts_dir/${{ inputs.arch }}-${{ inputs.distro-code }}.qcow2 + domain_name=${{ inputs.arch }}-${{ inputs.distro-code }}-${{ github.event.pull_request.head.sha }} + test_file=$code_path/tests/ltp.yml + + cp -r $code_path/ktest ktest + cd ktest + source $artifacts_dir/venv/bin/activate + + ./kernel_test.py kvm \ + --image $image \ + --test-kvm $domain_name \ + --ssh-auth-method sshkey \ + --ssh-pubkey-file ~/.ssh/id_rsa.pub \ + --kernel-package-dir $artifacts_dir \ + --log-dir $artifacts_dir/log \ + --tests-yml $test_file + shell: bash diff --git a/.github/code/configs/ktest-requirements.txt b/.github/code/configs/ktest-requirements.txt new file mode 100644 index 0000000000000..3548906f65a18 --- /dev/null +++ b/.github/code/configs/ktest-requirements.txt @@ -0,0 +1,12 @@ +bcrypt==4.1.3 +certifi==2024.7.4 +cffi==1.15.1 +charset-normalizer==3.3.2 +cryptography==42.0.8 +idna==3.7 +paramiko==3.4.0 +pycparser==2.21 +PyNaCl==1.5.0 +PyYAML==6.0.1 +requests==2.31.0 +schema==0.7.7 diff --git a/.github/code/configs/ltp-whitelist b/.github/code/configs/ltp-whitelist new file mode 100644 index 0000000000000..8448667d1a58b --- /dev/null +++ b/.github/code/configs/ltp-whitelist @@ -0,0 +1,5 @@ +ioctl03 +fanotify22 +ioprio_set03 +setsockopt08 +statx04 diff --git a/.github/code/dockerfiles/x86_64-buster.dockerfile b/.github/code/dockerfiles/x86_64-buster.dockerfile new file mode 100644 index 0000000000000..27d40b3bb3cdf --- /dev/null +++ b/.github/code/dockerfiles/x86_64-buster.dockerfile @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +FROM --platform=linux/amd64 debian:10 + +RUN mkdir /linux +RUN mkdir /linux/src +RUN mkdir /linux/output + +RUN apt update +RUN apt install -y bc +RUN apt install -y bison +RUN apt install -y ccache +RUN apt install -y cpio +RUN apt install -y dpkg-dev +RUN apt install -y flex +RUN apt install -y gcc +RUN apt install -y git +RUN apt install -y kmod +RUN apt install -y libaudit-dev +RUN apt install -y libbfd-dev +RUN apt install -y libdw-dev +RUN apt install -y libelf-dev +RUN apt install -y libiberty-dev +RUN apt install -y liblzma-dev +RUN apt install -y libnuma-dev +RUN apt install -y libperl-dev +RUN apt install -y libslang2-dev +RUN apt install -y libssl-dev +RUN apt install -y libunwind-dev +RUN apt install -y libunwind8-dev +RUN apt install -y make +RUN apt install -y pkg-config +RUN apt install -y python +RUN apt install -y python3 +RUN apt install -y python3-pip +RUN apt install -y python3-requests +RUN apt install -y rsync +RUN apt install -y lsb-release +RUN apt install -y python-dev +RUN apt install -y python3-dev +RUN apt install -y sshpass +RUN apt install -y debhelper + +# WA for install pahole>=1.13 +RUN echo "deb http://deb.debian.org/debian bullseye main" >> /etc/apt/sources.list +RUN apt update && apt install -y dwarves diff --git a/.github/code/ktest/basic_func.py b/.github/code/ktest/basic_func.py new file mode 100644 index 0000000000000..50643840ce5b8 --- /dev/null +++ b/.github/code/ktest/basic_func.py @@ -0,0 +1,82 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +import os +import sys +import json +try: + # requests module doesn't come with the default Python3 package + import requests +except ImportError: + print("ERROR: Please install 'requests' module for python3:") + print(" sudo apt-get install python3-pip") + print(" sudo pip3 install requests") + raise +import time + + +def http_get(url, token): + headers = { + 'Accept': 'application/json', + 'Authorization': token, + } + response = requests.get(url=url, headers=headers) + + if response.status_code != requests.codes.ok: + raise RuntimeError("Failed request external API, GET request URL: {url}, error: {response}, " + "status code: {status_code}". + format(url=url, response=response.json(), status_code=response.status_code)) + + response_data = response.json() + return response_data + + +def http_post(url, post_data_dict, token): + + post_data = json.dumps(post_data_dict) + # content_length = str(len(post_data)) + headers = { + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'Authorization': token, + } + response = requests.post(url=url, headers=headers, data=post_data) + + if response.status_code != requests.codes.ok: + raise RuntimeError("Failed request external API, POST request URL: {url} with data: {data}, error: " + "{response}, status code: {status_code}". + format(url=url, data=post_data, response=response.json(), status_code=response.status_code)) + response_data = response.json() + return response_data + + +def http_put(url, post_data_dict, token): + + headers = { + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'Authorization': token, + } + response = requests.put(url=url, headers=headers, json=post_data_dict) + + if response.status_code != requests.codes.ok: + raise RuntimeError("Failed request external API, PUT request URL: {url} with data: {data}, error: " + "{response}, status code: {status_code}". + format(url=url, data=post_data_dict, response=response.json(), + status_code=response.status_code)) + response_data = response.json() + return response_data + + +def local_print(message, begin_newline=False): + if begin_newline: + print() + timestamp = time.asctime(time.localtime(time.time())) + print("{timestamp} {msg}".format(timestamp=timestamp, msg=message)) + sys.stdout.flush() + + +# for local debug +if __name__ == "__main__": + print(http_put("http://localhost:8080/api/jobs/cancel", {"job_ids": []}, "")) diff --git a/.github/code/ktest/kernel_test.py b/.github/code/ktest/kernel_test.py new file mode 100755 index 0000000000000..3856e297d65ef --- /dev/null +++ b/.github/code/ktest/kernel_test.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +""" +{script} tests the kernel packages (in the specified directory) on either KVM or +Physical machines. {script} interacts with the machine under test (KVM or +Physical) through SSH to install the kernel packages and conduct various kernel +tests. + +For KVM, the testing environment is set up by cloning the already prepared base +KVM domain (specified through --base-kvm) to +(specified through --test-kvm), and the tests are run in . When +tests finish, the cloned will be undefined and deleted. + +For Physical machine, two disk partitions need to be specified: --base-partition +and --test-partition. The base partition is where the root file system is mounted +when the test starts, and the test environment is set up by extracting the +prepared testing root file system gzip to the test partition. Then boot into the +test partition, and the tests are run in the test partition. When tests finish, +the machine will be reboot into the base partition to restore to the state where +the test starts. Following are the steps of this process: + +1. (in base partition) extract test_root.gzip to the test partition +2. (in base partition) set up grub and reboot to test partition +3. (in test partition) install kernel and run tests +4. (in test partition) set up grub and reboot to base partition +5. (in base partition) finish + +""" + +import argparse +import os +import sys +import traceback +from concurrent.futures import ThreadPoolExecutor + +import sys_commands +from kvm_manager import kvm_manager +from ssh_client import ssh_client +from unit_test import unit_test + +multi_executor = ThreadPoolExecutor(max_workers=40) + + +def local_print(message, color=sys_commands.PCOLOR_NONE, begin_newline=False): + if begin_newline: + print() + print(color + "{script}: {msg}".format( + script=os.path.basename(__file__), msg=message), + sys_commands.PCOLOR_END, flush=True) + sys.stdout.flush() + + +def check_opts(opts): + err = False + + # Check the existence of the sshkey file if specified + if opts.ssh_pubkey_file is not None and \ + not os.path.isfile(opts.ssh_pubkey_file): + print("ERROR. the specified --ssh-pubkey-file '{file}' ". + format(file=opts.ssh_pubkey_file) + "doesn't exist.") + err = True + + if err: + raise RuntimeError("ERROR. error detected in the argument options.") + +def get_opts(): + parser = argparse.ArgumentParser( + description=__doc__.format(script=os.path.basename(__file__)), + formatter_class=argparse.RawTextHelpFormatter) + + subparsers = \ + parser.add_subparsers(help="select machine type to run kernel tests", + dest="machine_type") + subparsers.required = True + + # arguments for kernel tests on KVM + parser_kvm = subparsers.add_parser('kvm', help='kvm virtual machine') + + parser_kvm.add_argument('-i', '--image', type=str, required=True, + metavar="", + help="Image to create guest") + + parser_kvm.add_argument('-t', '--test-kvm', type=str, required=True, + metavar="", + help="name of the test KVM domain, which will be " + "cloned from for kernel " + "tests") + + # arguments for kernel tests on physical machine + parser_phy = subparsers.add_parser('phy', help='physical machine') + + parser_phy.add_argument('-n', '--host-name', type=str, required=True, + metavar="", + help="host name of the physical machine to run " + "kernel tests on (either ip or hostname)") + + parser_phy.add_argument('-b', '--base-partition-uuid', type=str, required=True, + metavar="", + help="the base system partition uuid of the physical" + "machine, where the test partition is " + "prepared from and recovered to") + + parser_phy.add_argument('-t', '--test-partition-uuid', type=str, + required=True, metavar="", + help="the test partition UUID (should match " + "what's in /etc/fstab for the test image " + "partition)") + + # Add the common arguments for both kvm and physical machine. + for p in [parser_kvm]: + p.add_argument('-y', '--tests-yml', type=str, + required=True, metavar="", + help="the yml file that lists all the test yml files") + + p.add_argument('-l', "--log-dir", type=str, + required=True, metavar="", + help="the directory to store the test logs") + + p.add_argument('-k', '--kernel-package-dir', type=str, + required=True, metavar="", + help="the directory that has the kernel package " + "files (e.g. linux-*.deb) to be tested") + + p.add_argument('-s', '--ssh-auth-method', type=str, + required=True, choices=ssh_client.AUTH_METHODS, + help="authentication method used for ssh connection") + + p.add_argument('-p', '--ssh-pubkey-file', type=str, + help="ssh public key file for sshkey authentication " + "(only needed when '-s/--ssh-auth-method sshkey' " + "is set") + + return parser.parse_args() + +def get_test_machine(opts): + machine = kvm_manager(kvm_name=opts.test_kvm, + default_timeout=10) + machine.set_image(opts.image) + return machine + +def run_kernel_test(machine, kernel_package_dir, u_tests): + _, orig_uname = machine.ssh_exec_command("uname -a", capture_stdout=True) + + local_print("Install kernel packages in '{machine}' and verify...". + format(machine=machine.get_machine_name()), begin_newline=True) + + # Install the kernel packages + machine.install_and_boot_to_kernel(kernel_package_dir) + + # Verify the new kernel is being used. + _, new_uname = machine.ssh_exec_command("uname -a", capture_stdout=True) + local_print("Verify the new kernel is being used. orig_uname:'{orig_uname}, " + "new_uname:'{new_uname}'". + format(orig_uname=orig_uname, new_uname=new_uname), + begin_newline=True) + + ## Verify the new kernel is being used by checking with orig_uname + #if orig_uname == new_uname: + # raise RuntimeError("ERROR. old kernel is still being used in " + # "'{machine}' ".format(machine=machine.get_machine_name()) + \ + # "(by checking 'uname -a')") + #else: + # local_print("The new kernel is correctly installed in {machine}". + # format(machine=machine.get_machine_name())) + + out_put_result, print_color, success = u_tests.run_tests(machine) + return out_put_result, print_color, success + + +def main(): + opts = get_opts() + check_opts(opts) + u_tests = unit_test(opts.tests_yml, opts.log_dir) + + machine = None + + try: + machine = get_test_machine(opts) + machine.ssh_username_and_auth("root", opts.ssh_auth_method, + opts.ssh_pubkey_file) + machine.init_test_environment() + run_kernel_test(machine, opts.kernel_package_dir, u_tests) + except BaseException: + traceback.print_exc() + sys.stdout.flush() + sys.exit(1) + finally: + if machine is not None: + machine.test_cleanup() + + +if __name__ == "__main__": + main() + sys.exit(0) diff --git a/.github/code/ktest/kvm_manager.py b/.github/code/ktest/kvm_manager.py new file mode 100644 index 0000000000000..e5edb5f49343d --- /dev/null +++ b/.github/code/ktest/kvm_manager.py @@ -0,0 +1,285 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +import atexit +import os +import random +import re +import subprocess +import tempfile +import time +import uuid + +from machine_manager import machine_manager, overrides +from sys_commands import run_subprocess_cmd + +DOM_XML_TEMPLATE = ''' + + {name} + {uuid} + {mem_gb} + {mem_gb} + {nr_cpus} + + hvm + + {os_opts} + + + + + + destroy + restart + destroy + + {qemu_bin} + + + + +
+ + + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + + +
+ + +
+ + + /dev/urandom + +
+ + + +''' + +class kvm_manager(machine_manager): + possible_kvm_status = ["running", "idle", "paused", "in shutdown", + "shut off", "crashed", "pmsuspended"] + + def __init__(self, kvm_name, default_timeout): + machine_manager.__init__(self, machine_name=kvm_name, + default_timeout=default_timeout) + self._kvm_name = kvm_name + self._image = None + + @overrides(machine_manager) + def is_phy_machine(self): + return False + + @overrides(machine_manager) + def init_test_environment(self): + if self._image is None: + raise RuntimeError("Please 'set_image' for cloning before " + "'init_test_environment'.") + + self.create_guest(self._image, self._kvm_name) + + # Connect SSH + self.ssh_connect() + + def create_guest(self, image, name): + self.mac = self.gen_mac() + arch = os.uname().machine + qemu_bin = "/usr/bin/qemu-system-" + arch + os_opts = '' + if arch == 'aarch64': + machine_type = 'virt' + os_opts = ''' + /usr/share/AAVMF/AAVMF_CODE.fd + /tmp/kernel_test_kvm_buster-aarch64_11138735_VARS.fd + ''' + else: + machine_type = 'q35' + xml = DOM_XML_TEMPLATE.format( + qemu_bin=qemu_bin, + machine_type=machine_type, + os_opts=os_opts, + name=name, + mem_gb=20, + nr_cpus=20, + uuid=str(uuid.uuid4()), + image=os.path.abspath(image), + mac=self.mac, + arch=arch, + ) + domf = tempfile.NamedTemporaryFile(suffix=".xml", mode='w') + domf.write(xml + "\n") + domf.flush() + print("Creating VM...") + cmd = ['virsh', 'define', domf.name] + subprocess.check_call(cmd) + self._virsh_kvm_control("start") + atexit.register(self.cleanup) + + def gen_mac(self): + r1 = random.randint(1, 255) + r2 = random.randint(1, 255) + r3 = random.randint(1, 255) + return "52:54:00:%02x:%02x:%02x" % (r1, r2, r3) + + def cleanup(self): + cmd = ['virsh', 'destroy', self._kvm_name] + subprocess.call(cmd) + cmd = ['virsh', 'undefine', self._kvm_name] + subprocess.call(cmd) + + @overrides(machine_manager) + def wait_for_online(self): + # The ip address will be available when KVM is started and online. + # Poll ip address once every second, and set timeout to 30 seconds, + # which should be enough for KVM to boot. + self._local_print("Polling IP address...") + start_time = time.time() + while time.time() - start_time < 1800: + cmd = "virsh domifaddr {} | grep ipv4".format(self._kvm_name) + res = run_subprocess_cmd(cmd, timeout=self._default_timeout, + no_print=True, capture_stdout=True, + shell=True, allow_nonzero=True) + if len(res) != 0: + ip_addr = re.split(r'\s{2,}', res)[-1].split('/')[0] + self._local_print("Found '{kvm}' IP address: {ip}". + format(kvm=self._kvm_name, ip=ip_addr)) + if not machine_manager.machine_ssh_open(ip_addr): + self._local_print("SSH not ready, try again...") + time.sleep(5) + continue + self._machine_ip = ip_addr + return + + time.sleep(15) + + raise RuntimeError("Cannot get IP address for ssh for '{kvm}' in 300s". + format(kvm=self._kvm_name)) + + @overrides(machine_manager) + def reboot(self): + self.ssh_close() + self._virsh_kvm_control("shutdown") + self._wait_for_kvm_status("shut off") + self._virsh_kvm_control("start") + + @overrides(machine_manager) + def test_cleanup(self): + self._local_print("Perform the test cleanup...", begin_newline=True) + machine_manager.test_cleanup(self) + + # Destroy and undefine KVM (if exist). + kvm_exist, _ = \ + kvm_manager.check_kvm_status(self._kvm_name, + timeout=self._default_timeout) + if kvm_exist: + # Force KVM to be destroyed before undefine it. + self._virsh_kvm_control("destroy", allow_nonzero=True, timeout=100) + self._virsh_kvm_control("undefine", args=["--remove-all-storage", "--nvram"], timeout=500) + # Sanity check + kvm_exist, _ = \ + kvm_manager.check_kvm_status(self._kvm_name, + timeout=self._default_timeout) + if kvm_exist: + raise RuntimeError("ERROR: {kvm} should have been undefined". + format(kvm=self._kvm_name)) + + @overrides(machine_manager) + def install_and_boot_to_kernel(self, kernel_package_dir): + # Install the kernel package + self._install_kernel_package(kernel_package_dir) + # Reboot to enter the new kernel + self.reboot() + # Reconnect the SSH + self.ssh_connect() + + def set_image(self, image): + self._image = image + + def _virsh_kvm_control(self, ctrl, args=[], capture_stdout=False, + allow_nonzero=False, timeout=None): + destroy_cmd = ["virsh", ctrl, self._kvm_name] + args + timeout = self._default_timeout if timeout is None else timeout + return run_subprocess_cmd(destroy_cmd, timeout=timeout, + capture_stdout=capture_stdout, + allow_nonzero=allow_nonzero) + + def _wait_for_kvm_status(self, expect_status, timeout=None): + if expect_status not in self.possible_kvm_status: + raise RuntimeError("ERROR. Invalid KVM expect_status: '{status}'". + format(status=expect_status)) + + start_time = time.time() + timeout = self._default_timeout if timeout is None else timeout + while True: + kvm_exist, kvm_status = \ + kvm_manager.check_kvm_status(self._kvm_name, timeout) + if not kvm_exist: + raise RuntimeError("ERROR. KVM '{kvm}' doesn't exist". + format(kvm=self._kvm_name)) + if kvm_status == expect_status: + return + time.sleep(1) + if time.time() - start_time > timeout: + raise RuntimeError("Timeout waiting for '{k}' to status '{s}'". + format(k=self._kvm_name, s=expect_status)) + + @staticmethod + def check_kvm_status(kvm_domain, timeout): + cmd = "virsh list --all | grep \" {kvm} \"".format(kvm=kvm_domain) + res = run_subprocess_cmd(cmd, timeout, no_print=True, + capture_stdout=True, shell=True, + allow_nonzero=True) + + kvm_exist, kvm_status = False, None + if len(res) != 0: + kvm_exist = True + kvm_status = re.split(r'\s{2,}', res.replace(kvm_domain, ''))[-1] + + return kvm_exist, kvm_status diff --git a/.github/code/ktest/machine_manager.py b/.github/code/ktest/machine_manager.py new file mode 100644 index 0000000000000..f69116f31cfcd --- /dev/null +++ b/.github/code/ktest/machine_manager.py @@ -0,0 +1,310 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +import abc +import glob +import os +import socket +import stat +import sys +import time + +import sys_commands +from ssh_client import ssh_client + +kernel_packages = ["linux-headers*.deb", + "linux-image-*-a[m\|r][d\|m]64_*.deb", + "linux-libc*.deb"] + +def overrides(interface_class): + def overrider(method): + assert method.__name__ in dir(interface_class), \ + "ERROR: the function '{}' marked as override doesn't exist ". \ + format(method.__name__) + "in the parent class '{}'". \ + format(interface_class.__name__) + return method + return overrider + +class machine_manager(metaclass=abc.ABCMeta): + SSH_PORT = 22 + + def __init__(self, machine_name, default_timeout): + self._default_timeout = default_timeout + self._ssh_client = ssh_client(default_timeout=default_timeout) + # self._machine_name is the name of the machine. For KVM machine, this + # is the KVM domain name. For physical machine, this can be either the + # machine's hostname or ip address. + self._machine_name = machine_name + # the machine's IP address, set in self.wait_for_online + self._machine_ip = None + # self._username is the username used to ssh connect to the machine, + # whichi will be set in self.ssh_username_and_auth function. + self._username = None + + @abc.abstractmethod + def is_phy_machine(self): + pass + + @abc.abstractmethod + def init_test_environment(self): + # Initialize the test environment. When returns, the machine should be + # connected by SSH and ready to accept self.ssh_exec_command for tests. + pass + + @abc.abstractmethod + def wait_for_online(self): + # Wait for the machine to be online (ready for ssh connection) and + # set self._machine_ip for ssh connection. + pass + + @abc.abstractmethod + def reboot(self): + # subclass to implement the reboot process + pass + + @abc.abstractmethod + def install_and_boot_to_kernel(self, kernel_package_dir): + # Install kernel_packages in the specified directory and reboot into + # the installed kernel. When returns, the machine should be connected + # by SSH and ready to accept self.ssh_exec_command in the newly- + # installed kernel environment. + pass + + @abc.abstractmethod + def test_cleanup(self): + self.ssh_close() + # subclass to implement the extra cleanup + + @staticmethod + def check_kernel_dir_err(kernel_package_dir): + err = False + + # Sanity check: make sure exists + if not os.path.isdir(kernel_package_dir): + print("ERROR: the specified --kernel-package-dir '{dir}' ". + format(dir=kernel_package_dir) + "doesn't exist.") + err = True + + # Sanity check: make sure the kernel packages exist in + for package in kernel_packages: + if not glob.glob(os.path.join(kernel_package_dir, package)): + print("ERROR: the kernel package '{ker}' doesn't exist in ". + format(ker=package) + "specified --kernel-package-dir '{dir}'". + format(dir=kernel_package_dir)) + err = True + + return err + + @classmethod + def machine_ssh_open(cls, machine_ip): + af = socket.AF_INET + if len(machine_ip.split('.')) != 4: + af = socket.AF_INET6 + sock = socket.socket(af, socket.SOCK_STREAM) + res = sock.connect_ex((machine_ip, cls.SSH_PORT)) + sock.close() + return res == 0 + + def get_machine_name(self): + return self._machine_name + + def get_machine_ip(self): + return self._machine_ip + + def ssh_username_and_auth(self, username, auth_method, + ssh_pubkey_file=None): + self._username = username + self._ssh_client.set_username_and_auth( + username=username, auth_method=auth_method, + ssh_pubkey_file=ssh_pubkey_file) + + def ssh_connect(self): + self.wait_for_online() + self._ssh_client.connect(host_addr=self._machine_ip) + # Send a pwd command to verify the machine is ready to take ssh command. + # For the physical machine immediately after reboot, even though the SSH + # port is open and connected, it may still take a while for the machine + # to fully come up and execute the first SSH command, so set the timeout + # to 180 seconds (3 min). + self._local_print("SSH connected, waiting for machine up to take SSH " + "command (timeout = 3 minutes)") + self.ssh_exec_command("pwd", timeout=180, out=None) + + def ssh_close(self): + self._ssh_client.close() + + def ssh_exec_command(self, cmd, timeout=None, capture_stdout=False, + allow_nonzero=False, out=sys.stdout, + nonblock=False, retries=0, retry_wait_secs=60): + return self._ssh_client.exec_command( + cmd=cmd, timeout=timeout, capture_stdout=capture_stdout, + allow_nonzero=allow_nonzero, out=out, nonblock=nonblock, + retries=retries, retry_wait_secs=retry_wait_secs) + + def download_machine_dir_files(self, machine_dir, local_dir): + try: + file_attr_list = self._ssh_client.get_dir_file_attrs(machine_dir) + except FileNotFoundError: + self._local_print("Directory '{dir}' not found on the machine " + "'{machine}', ignore download.".format( + dir=machine_dir, machine=self.get_machine_name()), + color=sys_commands.PCOLOR_PINK) + return + + if not os.path.isdir(local_dir): + self._local_print("Local direcotry '{dir}' not found, ignore " + "download".format(dir=local_dir), + color=sys_commands.PCOLOR_PINK) + return + + for file_attr in file_attr_list: + mf = os.path.join(machine_dir, file_attr.filename) + lf = os.path.join(local_dir, file_attr.filename) + if stat.S_ISDIR(file_attr.st_mode): + # Create local subdirectory + os.mkdir(lf) + self._local_print("Recursively download files in {mf} to {lf}". + format(mf=mf, lf=lf), color=sys_commands.PCOLOR_BLUE) + self.download_machine_dir_files(mf, lf) + else: + self._ssh_client.scp_file_from_client(src_file=mf, dst_file=lf) + + def put_file(self, src, dst): + return self._ssh_client.scp_file_to_client(src_file=src, dst_file=dst) + + def _ssh_apt_update(self): + # We need --allow-releaseinfo-change option because we may see + # following errors when debian release a new stable version on latest + # debian version: + # + # Repository + # 'http://security.debian.org/debian-security buster/updates InRelease' + # changed its 'Suite' value from 'stable' to 'oldstable' + # + # On the other hand, old debian versions (like version 8) does not + # support --allow-releaseinfo-change option, and they will not have + # above error too, so the first apt-get update -y will always work on + # old versions. + self.ssh_exec_command("sudo apt-get update -y || sudo apt-get update --allow-releaseinfo-change -y", + timeout=300, retries=3, retry_wait_secs=60) + def _install_kernel_package(self, kernel_package_dir): + self._install_depends_package() + + for package in kernel_packages: + package_path = \ + glob.glob(os.path.join(kernel_package_dir, package))[0] + package_name = os.path.basename(package_path) + + if self._username == "root": + ssh_package_path = '/root/{name}'.format( + usr=self._username, name=os.path.basename(package_name)) + else: + ssh_package_path = '/home/{usr}/{name}'.format( + usr=self._username, name=os.path.basename(package_name)) + self._ssh_client.scp_file_to_client(src_file=package_path, + dst_file=ssh_package_path) + try: + self._dpkg_install(ssh_package_path) + except: + ret, stdout_content = self.ssh_exec_command( + "sudo lsof /var/lib/dpkg/lock*", allow_nonzero=True) + if ret == 0: + self.ssh_exec_command( + "sudo rm -f /var/lib/dpkg/lock*", allow_nonzero=True) + self._dpkg_install(ssh_package_path) + + def _update_grub(self): + ret, installed_kernel_version = self.ssh_exec_command( + "dpkg -I /root/linux-image-*-a[m\|r][d\|m]64_*.deb | grep Description | awk '{print $5}'", + capture_stdout=True, allow_nonzero=True) + installed_kernel_version = str(installed_kernel_version).replace('\n', '').replace('\r', '').strip() + self._local_print("installed_kernel_version: {installed_kernel_version}".format( + installed_kernel_version=installed_kernel_version)) + + if ret != 0: + raise RuntimeError("failed to get newly installed kernel version") + + get_grub_order = 'index=0 && cat /boot/grub/grub.cfg | grep "Debian GNU/Linux, with Linux " | while read ' \ + 'line; do if [[ $line =~ "{installed_kernel_version}" ]]; then echo $index && exit 0 ; fi ' \ + '&& let index+=1 ;done'.format(installed_kernel_version=installed_kernel_version) + ret, grub_order = self.ssh_exec_command(get_grub_order, capture_stdout=True, allow_nonzero=True) + grub_order = str(grub_order).replace('\n', '').replace('\r', '').strip() + self._local_print("grub_order: {grub_order}".format(grub_order=grub_order)) + if grub_order is None or "" == grub_order: + raise RuntimeError("failed to get newly installed kernel in grub file") + + update_grub = "sed -i 's/GRUB_DEFAULT=.*/GRUB_DEFAULT=\"Advanced options for Debian GNU\/Linux>Debian " \ + "GNU\/Linux, with Linux {installed_kernel_version}\"/g' /etc/default/grub && " \ + "/usr/sbin/update-grub && " \ + "/usr/sbin/update-grub2".format(installed_kernel_version=installed_kernel_version) + self.ssh_exec_command(update_grub, allow_nonzero=True) + + def _dpkg_install(self, path): + # Install the kerne package + # Ideally the machine should have all the dependencies installed + # already (libraries for linux-tools, for example), but in order + # for the CI procedure to be a bit more robust, we try 'apt-get + # install -f' first if we need some packages from the apt source, + # before calling the install a failure. + self.ssh_exec_command( + "sudo apt-get install -f -y", timeout=300) + self.ssh_exec_command( + "sudo dpkg -i {}".format(path), timeout=900) + + def _local_print(self, message, color=sys_commands.PCOLOR_NONE, + begin_newline=False): + if begin_newline: + print() + print(color + "{machine}: {msg}".format(machine=self._machine_name, + msg=message), + sys_commands.PCOLOR_END, flush=True) + + def trigger_crash(self): + ret, _ = self.ssh_exec_command("dpkg -l | grep kdump-tools", allow_nonzero=True) + if ret != 0: + raise RuntimeError("ERROR. kdump-tools is not installed, could not test crash") + + self.ssh_exec_command("sync") + self.ssh_exec_command("echo c > /proc/sysrq-trigger", nonblock=True) + self.ssh_close() + # Wait for ssh port to be closed to make sure the reboot has started. + # Set timeout to 30 seconds. + self._local_print("crash command sent and wait for SSH port close " + "(timeout 30 minutes)...") + start_time = time.time() + while time.time() - start_time < 300: + if not self.machine_ssh_open(machine_ip=self._machine_ip): + # server trigger crash and shutdown successfully + return + time.sleep(1) + self._local_print("ERROR. trigger crash and shutdown failed") + raise RuntimeError("Time out waiting for SSH connection close " + "'{machine}' in 300 seconds".format(machine=self._machine_name)) + + def test_kdump(self): + self.ssh_exec_command("ls /var/crash | grep bak-", allow_nonzero=True) + _, orig_crash_num = self.ssh_exec_command("ls /var/crash | grep bak- | wc -l", capture_stdout=True) + + self.trigger_crash() + self.ssh_connect() + + _, new_crash_num = self.ssh_exec_command("ls /var/crash | grep bak- | wc -l", capture_stdout=True) + + if new_crash_num == orig_crash_num: + self._local_print("ERROR. Test kdump-tools failed, NO new crash file generated", + color=sys_commands.PCOLOR_RED) + return False + else: + self.ssh_exec_command("ls /var/crash | grep bak-", allow_nonzero=True) + self._local_print("Test kdump-tools successfully, new crash file generated", + color=sys_commands.PCOLOR_GREEN) + return True + + def _install_depends_package(self): + # depends for 'dpkg -i {kernel packages}' + self._ssh_apt_update() + self.ssh_exec_command("apt-get install -y libdw1 libnuma1 libunwind8", + timeout=300, retries=3, retry_wait_secs=60) + # self.ssh_exec_command("export DEBIAN_FRONTEND=noninteractive;if [[ $(grep 'bookworm' /etc/os-release) ]]; then apt-get install -y kdump-tools -t lyra-private;else apt-get install -y kdump-tools -t $(lsb_release -c|awk '{print $NF}')-private;fi", + # timeout=300, retries=3, retry_wait_secs=60) diff --git a/.github/code/ktest/ssh_client.py b/.github/code/ktest/ssh_client.py new file mode 100644 index 0000000000000..370fcf4744f88 --- /dev/null +++ b/.github/code/ktest/ssh_client.py @@ -0,0 +1,206 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +import os +import sys +import time +import socket +import sys_commands +try: + # paramiko module doesn't come with the default Python3 package + import paramiko +except ImportError: + print("ERROR: Please install 'paramiko' module for python3:") + print(" sudo apt-get install python3-pip") + print(" sudo pip3 install paramiko") + print(" sudo pip3 install --upgrade paramiko") + print(" sudo pip3 install paramiko[gssapi]") + raise + +class ssh_client: + # The return value of the 'timeout' command when timeout occurs + CMD_TIMEOUT_RET_VALUE = 124 + + AUTH_METHOD_SSHKEY = "sshkey" + AUTH_METHOD_KERBEROS = "kerberos" + AUTH_METHODS = [AUTH_METHOD_SSHKEY, AUTH_METHOD_KERBEROS] + + def __init__(self, default_timeout): + self._default_timeout = default_timeout + self._ssh = None + self._host_addr = None + self._username = None + self._auth_method = None + self._ssh_pubkey_file = None + + def __del__(self): + self.close() + + def set_username_and_auth(self, username, auth_method, + ssh_pubkey_file=None): + self._username = username + + if auth_method not in self.AUTH_METHODS: + raise RuntimeError("ERROR. Invalid ssh authentication method. " + "Please select from: {}".format('/'.join(self.AUTH_METHODS))) + + if auth_method == self.AUTH_METHOD_SSHKEY: + if ssh_pubkey_file is None: + raise RuntimeError("ERROR. Must set the ssh public key file " + "for the selected authentication method: {}". + format(auth_method)) + elif auth_method == self.AUTH_METHOD_KERBEROS: + # Should NOT set the ssh public key file for the selected authentication method: kerberos + ssh_pubkey_file = None + + self._auth_method = auth_method + self._ssh_pubkey_file = ssh_pubkey_file + + def connect(self, host_addr, timeout=None): + if self._username is None or self._auth_method is None: + raise RuntimeError("ERROR. Please set the username and the ssh " + "authentication method in 'set_username_and_auth' before the " + "ssh connection") + + self._host_addr = host_addr + + self._ssh = paramiko.SSHClient() + self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + timeout = self._default_timeout if timeout is None else timeout + start_time = time.time() + while True: + if time.time() - start_time > timeout: + raise RuntimeError("ERROR. SSH connection to {client} time out". + format(client=self._get_client_name())) + try: + if self._auth_method == self.AUTH_METHOD_SSHKEY: + self._ssh.connect(host_addr, username=self._username, + key_filename=self._ssh_pubkey_file, + timeout=timeout) + elif self._auth_method == self.AUTH_METHOD_KERBEROS: + self._ssh.connect(host_addr, username=self._username, + gss_auth=True, + gss_trust_dns=False, + timeout=timeout) + + break + except (paramiko.ssh_exception.NoValidConnectionsError, + paramiko.ssh_exception.SSHException) as e: + # SSH transport is not ready. Try again in 1 second. + self._local_print("retry for ssh {host_addr}, due to ssh_exception: " + "{ssh_exception}".format(host_addr=host_addr, ssh_exception=e)) + time.sleep(1) + continue + + def close(self): + if self._ssh is not None: + self._ssh.close() + + def _local_print(self, message, color=sys_commands.PCOLOR_NONE, + out=sys.stdout): + if out is None: + return + # No color if not output to stdout + c = color if out == sys.stdout else '' + c_end = sys_commands.PCOLOR_END if out == sys.stdout else '' + print(c + "SSH {client}: {msg}".format( + client=self._get_client_name(), msg=message), + c_end, file=out, flush=True) + + def _get_client_name(self): + return "{usr}@{host}".format(usr=self._username, host=self._host_addr) + + def exec_command(self, cmd, timeout=None, capture_stdout=False, + allow_nonzero=False, out=sys.stdout, nonblock=False, + retries=0, retry_wait_secs=60): + if self._ssh is None: + raise RuntimeError("ERROR. SSH connection to {} is not established". + format(self._get_client_name())) + + timeout = self._default_timeout if timeout is None else timeout + + for i in range(retries + 1): + if i > 0: + time.sleep(retry_wait_secs) + + if out is not None: + self._local_print("executing '{cmd}' in {clt} with timeout={tm}...". + format(cmd=cmd, clt=self._get_client_name(), tm=timeout), + color=sys_commands.PCOLOR_YELLOW, out=out) + _, ssh_stdout, ssh_stderr = self._ssh.exec_command(command=cmd, + timeout=timeout) + if nonblock: + return None, None + + stdout_content = None + try: + if capture_stdout: + stdout_content = "" + stdout_line = ssh_stdout.readline() + while stdout_line: + # Strip the newline at the end, as print will add newline. + self._local_print("(stdout) " + stdout_line.strip(), out=out) + if capture_stdout: + stdout_content += stdout_line + stdout_line = ssh_stdout.readline() + + stderr_line = ssh_stderr.readline() + while stderr_line: + self._local_print("(stderr) " + stderr_line.strip(), out=out, + color=sys_commands.PCOLOR_PINK) + stderr_line = ssh_stderr.readline() + + # Check the command exit value + ret = ssh_stdout.channel.recv_exit_status() + + except socket.timeout: + ret = self.CMD_TIMEOUT_RET_VALUE + + if ret == 0: + return ret, stdout_content + + if not allow_nonzero and ret != 0: + # Raise runtime error if the command doesn't return 0 + is_timeout = (ret == self.CMD_TIMEOUT_RET_VALUE) + raise RuntimeError("ERROR. SSH command '{cmd}' in {client} returns " + "{ret} (timeout: {tm})". + format(cmd=cmd, client=self._get_client_name(), ret=ret, + tm=is_timeout)) + + return ret, stdout_content + + def scp_file_to_client(self, src_file, dst_file): + if self._ssh is None: + raise RuntimeError("ERROR. SSH connection to {} is not established". + format(self._get_client_name())) + + self._local_print("SCP file '{src}' to '{dst}' in {client}...".format( + src=src_file, dst=dst_file, client=self._get_client_name()), + color=sys_commands.PCOLOR_BLUE) + ftp_client = self._ssh.open_sftp() + ftp_client.put(src_file, dst_file) + ftp_client.close() + + def scp_file_from_client(self, src_file, dst_file): + if self._ssh is None: + raise RuntimeError("ERROR. SSH connection to {} is not established". + format(self._get_client_name())) + + self._local_print("SCP file '{src}' in {client} to '{dst}'...".format( + src=src_file, dst=dst_file, client=self._get_client_name()), + color=sys_commands.PCOLOR_BLUE) + ftp_client = self._ssh.open_sftp() + ftp_client.get(src_file, dst_file) + ftp_client.close() + + def get_dir_file_attrs(self, dir_path): + if self._ssh is None: + raise RuntimeError("ERROR. SSH connection to {} is not established". + format(self._get_client_name())) + + ftp_client = self._ssh.open_sftp() + ret = ftp_client.listdir_attr(dir_path) + ftp_client.close() + return ret diff --git a/.github/code/ktest/sys_commands.py b/.github/code/ktest/sys_commands.py new file mode 100644 index 0000000000000..5c9bd76a2c786 --- /dev/null +++ b/.github/code/ktest/sys_commands.py @@ -0,0 +1,46 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +import os +import re +import subprocess +import sys + +PCOLOR_NONE = '' +PCOLOR_RED = '\033[31m' +PCOLOR_GREEN = '\033[32m' +PCOLOR_YELLOW = '\033[33m' +PCOLOR_BLUE = '\033[34m' +PCOLOR_PINK = '\033[35m' +PCOLOR_END = '\033[m' + +def local_print(message, color=PCOLOR_NONE): + print(color + "{script}: {msg}".format( + script=os.path.basename(__file__), msg=message), + PCOLOR_END, flush=True) + +def run_subprocess_cmd(cmd, timeout, no_print=False, capture_stdout=False, + shell=False, allow_nonzero=False): + if not no_print: + local_print("executing '{cmd}' with timeout={tm}...". + format(cmd=cmd if shell else ' '.join(cmd), tm=timeout), + color=PCOLOR_YELLOW) + + stdout = subprocess.PIPE if capture_stdout else None + # when nonzero return value is allowed, suppress the stderr. + stderr = subprocess.DEVNULL if allow_nonzero else None + res = subprocess.run(cmd, timeout=timeout, stdout=stdout, + stderr=stderr, shell=shell) + + if not allow_nonzero and res.returncode != 0: + # Raise runtime error if the command doesn't return 0 + raise RuntimeError("Command '{cmd}' returns {ret}". + format(cmd=cmd, ret=res.returncode)) + + if capture_stdout: + # The stdout value is byte sequence, so decode it to convert to string + # and strip the newline at the end. + return res.stdout.decode('utf-8').strip() + else: + return None diff --git a/.github/code/ktest/sys_pkgpool_api.py b/.github/code/ktest/sys_pkgpool_api.py new file mode 100644 index 0000000000000..06cad62c0aa8f --- /dev/null +++ b/.github/code/ktest/sys_pkgpool_api.py @@ -0,0 +1,153 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +""" +Call sys-pkgpool api to apply or return physical servers + +API referenc: + http://sys-pkgpool.byted.org/swagger/ + +Requirements: + None +""" +import basic_func +import random +from concurrent.futures import ThreadPoolExecutor + +BASE_URL = "http://sys-pkgpool.byted.org/api/" +URL_APPLY_MACHINES = BASE_URL + "applyMachines" +URL_RETURN_MACHINES = BASE_URL + "returnMachines" +URL_SUBMIT_JOB = BASE_URL + "job" +URL_LIST_JOB = BASE_URL + "jobs" +URL_GET_JOB = BASE_URL + "job" + "/{job_id}" +URL_CANCEL_JOBS = BASE_URL + "jobs/cancel" +URL_GET_Machine_List = BASE_URL + "machineList?arch={arch}" + +executor = ThreadPoolExecutor(max_workers=20) +TOKEN = "" +lock_prefix = "kernel_ci_cd_" + +CI_PRIORITY = 200 +JOB_DESCRIPTION = "apply_only" +JOB_WAITING = "waiting" +JOB_PROCESSING = "processing" + + +def apply_server(server_ip, job_id, expiration): + """ + :param server_ip: + :param job_id: + :param expiration: + :return: + """ + post_data_dict = { + "user": lock_prefix + str(job_id), + "expiration": expiration, + "ips": [ + server_ip + ], + "next_server": "10.129.65.104" + } + response = basic_func.http_post(URL_APPLY_MACHINES, post_data_dict, TOKEN) + basic_func.local_print("Install servers by post {url} with data: {post_data}, response: {response}". + format(url=URL_APPLY_MACHINES, post_data=post_data_dict, response=response)) + + return job_id if response["success_list"] is not None \ + and len(response["success_list"]) > 0 \ + and server_ip == response["success_list"][0] \ + else False + + +def release_server(server_ip, job_id, key=None): + """ + :param server_ip: + :param job_id: + :param key: + :return: + """ + user = lock_prefix + str(job_id) if key is None else key + return_ip = [server_ip] if type(server_ip) == str else server_ip + post_data_dict = { + "user": user, + "ips": return_ip + } + response = basic_func.http_post(URL_RETURN_MACHINES, post_data_dict, TOKEN) + basic_func.local_print("return servers by post {url} with data: {post_data}, response: {response}". + format(url=URL_RETURN_MACHINES, post_data=post_data_dict, response=response)) + + return True if response["success_list"] is not None \ + and len(response["success_list"]) > 0 \ + else False + + +def submit_job(job_id, cpu_arch, strategy="random_2"): + """ + :param job_id: + :param cpu_arch: "all" "x86_64" "aarch64" + :param strategy: + :return: job_id in pkgpool + """ + user = lock_prefix + str(job_id) + post_data_dict = { + "description": JOB_DESCRIPTION, + "priority": CI_PRIORITY, + "submitter_email": user, + "exec_expire": 24 * 5, # hours + "test_scheduler_strategy": strategy, + "test_arch": cpu_arch, + "dhcp_server": "10.129.65.104", + + } + response = basic_func.http_post(URL_SUBMIT_JOB, post_data_dict, TOKEN) + basic_func.local_print("submit job by post {url} with data: {post_data}, response: {response}". + format(url=URL_SUBMIT_JOB, post_data=post_data_dict, response=response)) + + if response["data"] is not None \ + and response["data"]["job_id"] is not None: + return response["data"]["job_id"] + + return 0 + + +def get_job_status(job_id): + url = URL_GET_JOB.format(job_id=job_id) + job_status = basic_func.http_get(url, TOKEN) + basic_func.local_print("Get job status from {url}, job status: {status} ". + format(url=url, status=job_status)) + return job_status + + +def get_machine_list(arch): + if arch is None or arch == "": + arch = "all" + url = URL_GET_Machine_List.format(arch=arch) + machine_list = basic_func.http_get(url, TOKEN) + basic_func.local_print("Get machine_list from {url}, machine list: {machine_list} ". + format(url=url, machine_list=machine_list)) + return machine_list["machine_list"] + + +def list_job(job_status): + url = URL_LIST_JOB + if job_status is not None: + url = url + "?job_status=" + job_status + job_list = basic_func.http_get(url, TOKEN) + basic_func.local_print("list job from {url}, job list: {job_list} ". + format(url=url, job_list=job_list)) + return job_list + + +def cancel_jobs(job_ids): + """ + :param job_ids: type: list + :return: job_id in pkgpool + """ + data_dict = { + "job_ids": job_ids + } + response = basic_func.http_put(URL_CANCEL_JOBS, data_dict, TOKEN) + basic_func.local_print("cancel job by post {url} with data: {data_dict}, response: {response}". + format(url=URL_CANCEL_JOBS, data_dict=data_dict, response=response)) + + return diff --git a/.github/code/ktest/unit_test.py b/.github/code/ktest/unit_test.py new file mode 100644 index 0000000000000..4fd9029a3f537 --- /dev/null +++ b/.github/code/ktest/unit_test.py @@ -0,0 +1,331 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +import os +import shutil +import sys +import time + +import sys_commands + +try: + from schema import And, Optional, Schema, SchemaError, Use +except ImportError: + print("ERROR: Please install 'schema' module for python3:") + print(" sudo apt-get install python3-pip") + print(" sudo pip3 install schema") + raise +try: + import yaml +except ImportError: + print("ERROR: Please install 'yaml' module for python3:") + print(" sudo apt-get install python3-pip") + print(" sudo pip3 install pyyaml") + raise + +test_schema = Schema({ + 'name': And(Use(str)), + 'repo': { + 'url': And(Use(str)), + 'branch': And(Use(str)), + 'timeout': And(Use(str)), + }, + 'before_test': { + 'script': And(Use(list)), + 'timeout': And(Use(int)), + 'repeat_on_fail': And(Use(int)), + }, + 'test': { + 'script': And(Use(list)), + 'timeout': And(Use(int)), + 'repeat': And(Use(int)), + }, + Optional('log_dir'): And(Use(list)), + Optional('test_on_kvm'): And(Use(bool)), + Optional('copy_kernel_source'): And(Use(bool)), + Optional('skip_phy_machine'): And(Use(list)), +}) + +def local_print(message, color=sys_commands.PCOLOR_NONE, begin_newline=False): + if begin_newline: + print() + print(color + "{ts:.3f} {script}: {msg}".format( + ts=time.time(), + script=os.path.basename(__file__), msg=message), + sys_commands.PCOLOR_END, flush=True) + +class unit_test: + def __init__(self, tests_yml, log_dir): + tests = yaml.load(open(tests_yml, 'r'), Loader=yaml.Loader) + self._test_list = [] + self._log_dir = log_dir + + # Create a fresh log directory + if os.path.exists(log_dir): + shutil.rmtree(log_dir) + os.mkdir(log_dir) + + # No test specified + if tests is None: + return + + t = self._load_and_verify_test_yml(tests_yml) + # Insert the test yml file name + t['_yml_file_name'] = os.path.basename(tests_yml) + self._show_test_config(t) + # Insert the result dict, which will be filled later in 'run_tests' + t['_result'] = { + 'skip': False, + 'before_test_fail': True, + 'test_success_num': 0 + } + self._test_list.append(t) + + + + def run_tests(self, machine): + success = True + machine.ssh_exec_command("apt-get install -y git", out=None, timeout=500) + for test in self._test_list: + # Check if this machine should be skipped for this test + if self._test_skip_machine(machine, test): + test['_result']['skip'] = True + local_print("{machine} Skip unit test '{test}' ({file})". + format(test=test['name'], file=test['_yml_file_name'], + machine=machine.get_machine_name()), + color=sys_commands.PCOLOR_GREEN) + continue + + # Create directory inside the specified log_dir for this test + local_log_dir = os.path.join(self._log_dir, test['name'] + "-" + machine.get_machine_ip()) + os.mkdir(local_log_dir) + # Run test + if self._run_single_test(machine, test, local_log_dir): + local_print( + "{machine} Unit test '{test}' ({file}) SUCCEEDED". + format(test=test['name'], file=test['_yml_file_name'], + machine=machine.get_machine_name()), + color=sys_commands.PCOLOR_GREEN) + else: + success = False + local_print( + "{machine} Unit test '{test}' ({file}) FAILED". + format(test=test['name'], file=test['_yml_file_name'], + machine=machine.get_machine_name()), + color=sys_commands.PCOLOR_RED) + + out_put_result, print_color = self._print_test_result(machine) + + # Raise error if any test failed + + if not success and machine.__class__.__name__ != "idc_phy_manager": + raise RuntimeError("ERROR. Some unit test failed.") + + return out_put_result, print_color, success + + def _show_test_config(self, test): + local_print("Got unit test '{test}' ({file}):".format( + test=test['name'], file=test['_yml_file_name']), + color=sys_commands.PCOLOR_GREEN) + print(yaml.dump(test)) + + def _print_test_result(self, machine): + out_put_result = [] + result_title = "{machine} Unit Test Result:".format(machine=machine.get_machine_name()) + local_print(result_title, color=sys_commands.PCOLOR_GREEN, begin_newline=True) + out_put_result.append(result_title) + + print_color = sys_commands.PCOLOR_GREEN + for test in self._test_list: + r = test['_result'] + result_str = "{machine} {test}: ".format(machine=machine.get_machine_name(), test=test['name']) + if r['skip']: + result_str += "skipped" + elif r['before_test_fail']: + result_str += "failed in before_test" + print_color = sys_commands.PCOLOR_RED + else: + result_str += "{p}/{t} success".format( + p=r['test_success_num'], t=test['test']['repeat']) + if r['test_success_num'] < test['test']['repeat']: + print_color = sys_commands.PCOLOR_RED + + local_print(result_str, color=print_color) + out_put_result.append(result_str) + return out_put_result, print_color + + def _load_and_verify_test_yml(self, t_yml_file): + t = yaml.load(open(t_yml_file, 'r'), Loader=yaml.Loader) + try: + test_schema.validate(t) + except SchemaError: + print("ERROR. Schema error in test yml file: {}".format(t_yml_file)) + raise + return t + + def _test_skip_machine(self, machine, test): + if machine.is_phy_machine(): + # machine is physicla machine + if 'skip_phy_machine' not in test: + # 'skip_phy_machine' not specified in the test yml, assume not + # skip any physical machine. + return False + return 'all' in test['skip_phy_machine'] or \ + machine.get_machine_ip() in test['skip_phy_machine'] or \ + machine.get_machine_name() in test['skip_phy_machine'] + else: + # machine is KVM + if 'test_on_kvm' not in test: + # 'test_on_kvm' not specified in the test yml, assume skip KVM + return True + return not test['test_on_kvm'] + + def _run_single_test(self, machine, test, local_log_dir): + test_name = test['name'] + + if "KDUMP-TOOLS-TEST" == test_name: + # run kdump test + local_print("==================== run kdump test of kdump-tools ==================== ", + color=sys_commands.PCOLOR_GREEN) + ret = machine.test_kdump() + + test['_result']['before_test_fail'] = False + + test_itr_log_dir = os.path.join(local_log_dir, test_name + ".test") + os.mkdir(test_itr_log_dir) + self._copy_test_log(machine, test, test_itr_log_dir) + + if ret: + test['_result']['test_success_num'] += 1 + return ret + + local_print("{machine} Running unit test '{test}' ({file})". + format(test=test_name, file=test['_yml_file_name'], + machine=machine.get_machine_name()), + color=sys_commands.PCOLOR_GREEN) + + # Check out the test repo + self._clone_test_repo(machine, test) + + # Run the before test script (return immediately when any command fails) + build_log = os.path.join(local_log_dir, test_name+".before_test.out") + local_print("{machine} Running before test tasks for '{t}' (log: {l})...".format( + machine=machine.get_machine_name(), t=test_name, l=build_log)) + with open(build_log, "a", newline='\n') as log_file: + for i in range(test['before_test']['repeat_on_fail']): + local_print("{machine} Running before_test script (iteration {itr}, log: {l})...". + format(machine=machine.get_machine_name(), itr=i+1, l=build_log)) + ret = self._machine_exec_test_command( + machine, test['before_test']['script'], + test['before_test']['timeout'], log_file, "before test script") + if ret == 0: + test['_result']['before_test_fail'] = False + break + time.sleep(60) + if test['_result']['before_test_fail']: + return False + + if 'copy_kernel_source' in test: + if test['copy_kernel_source']: + self._copy_kernel_source(machine) + # Run the test (return immediately when any command fails) + success = True + for i in range(test['test']['repeat']): + # Create log directory for this test iteration + test_itr_log_dir = os.path.join( + local_log_dir, test_name + ".test.{itr}".format(itr=i+1)) + os.mkdir(test_itr_log_dir) + test_log = os.path.join(test_itr_log_dir, test_name+".test.out") + local_print("{machine} Running '{t}' test (iteration {itr}, log: {l})...". + format(machine=machine.get_machine_name(), t=test_name, itr=i+1, l=test_log)) + with open(test_log, "a", newline='\n') as log_file: + ret = self._machine_exec_test_command( + machine, test['test']['script'], test['test']['timeout'], + log_file, "test script (iteration {itr})".format(itr=i+1)) + # Copy log from the specified test log directories + self._copy_test_log(machine, test, test_itr_log_dir) + if ret == 0: + test['_result']['test_success_num'] += 1 + else: + success = False + + return success + + def _copy_kernel_source(self, machine): + local_print("====== copy kernel source") + tarf = "/var/tmp/kernel_source.tar" + sys_commands.run_subprocess_cmd("git archive HEAD >" + tarf, shell=True, timeout=1000) + local_print("put file " + tarf) + machine.put_file(tarf, tarf) + machine.ssh_exec_command("mkdir -p /var/tmp/kernel_source; tar -C /var/tmp/kernel_source -xf " + tarf) + + def _clone_test_repo(self, machine, test): + local_print("{machine} Cloning '{test}' test repo...".format(machine=machine.get_machine_name(), + test=test['name'])) + + if test['repo']['url'] is None: + local_print("repo is NULL, skipping") + return + + git_cmd = "git clone --depth 1 --branch {branch} {git}".format( + branch=test['repo']['branch'], git=test['repo']['url']) + timeout = test['repo']['timeout'] + + if not machine.is_phy_machine(): + local_print("Add 60 seconds to the specified clone timeout in KVM, " + "as it takes longer to establish connection in KVM (for some " + "reason)") + timeout += 60 + + # Try to clone 3 times to avoid failures due to unstable connection + try_num = 3 + for i in range(1, try_num+1): + # If there exists a directory with the same name as the test repo, + # remove it before cloning. + test_dir = test['repo']['url'].split('/')[-1].split('.git')[0] + r, _ = machine.ssh_exec_command("ls {}".format(test_dir), out=None, + allow_nonzero=True) + if r == 0: + machine.ssh_exec_command("rm -rf {}".format(test_dir)) + + local_print("{machine} Clone try {i}...".format(machine=machine.get_machine_name(), i=i)) + start_time = time.time() + ret, _ = machine.ssh_exec_command(git_cmd, timeout=timeout, + allow_nonzero=(i/dev/null; then + continue + fi + + if ! ./scripts/checkpatch.pl -g "$line" --no-summary; then + ret=1 + fi + +done < <(eval git log --oneline --no-color --no-merges "$@" --no-abbrev-commit | grep -E -o "[0-9a-f]{40}") + +exit $ret diff --git a/.github/code/scripts/git-check-backports b/.github/code/scripts/git-check-backports new file mode 100755 index 0000000000000..50bf2241a5d73 --- /dev/null +++ b/.github/code/scripts/git-check-backports @@ -0,0 +1,185 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +import sys +import argparse +import subprocess +import re +from collections import defaultdict +import logging + +''' +Check backport commits in range: + +- Have upstream ID +- Fixes are also backported +- Fixes to fixes are also backported +''' +upstream_patterns = [ + re.compile(r"commit\s+([0-9a-f]{12,40})\s+upstream"), + re.compile(r"upstream\s+commit\s+([0-9a-f]{12,40})"), + re.compile(r"cherry picked from commit ([0-9a-f]{12,40})"), +] + +class Commit(object): + + def __init__(self, commit_id, subject, message): + self.commit_id = commit_id + self.subject = subject + self.message = message + self.fixes = self.parse_fixes(message) + self.upstream_commit_ids = self.parse_upstreams(message) + + def parse_fixes(self, message): + for line in message.splitlines(): + line = line.strip() + if line.lower().startswith("fixes:"): + fs = line.split() + if len(fs) > 1: + return fs[1][:12] + + def parse_upstreams(self, message): + ret = [] + for line in message.splitlines(): + line = line.strip().lower() + for pt in upstream_patterns: + m = pt.search(line) + if m: + ret.append(m.group(1)) + return set(ret) + + def __str__(self): + return "%s (%s)" % (self.commit_id[:12], self.subject) + +class BackportChecker(object): + def __init__(self, check_range, lookback, upstream_head, allow_missing_upstream): + self.commits = {} + self.check_range = check_range + self.lookback = lookback + self.upstream_head = upstream_head + self.allow_missing_upstream = allow_missing_upstream + self.failed_commits = [] + + def check(self): + # These are commit ids in the MR + self.downstream_commits = self.collect_commits(self.check_range) + if not self.downstream_commits: + logging.info("no downstream commit found") + return True + logging.debug("checking %d downstream commits" % len(self.downstream_commits)) + # These are upstream commit ids being backported + self.lookback_commits = self.collect_commits("{first}~{n}..{first}".format(first=self.downstream_commits[0], n=self.lookback)) + logging.debug("as well as %d downstream existing commits" % len(self.lookback_commits)) + self.all_backported_upstream_commits = set() + upstream_commits_in_this_backport = set() + for x in self.downstream_commits: + for i in self.commits[x].upstream_commit_ids: + upstream_commits_in_this_backport.add(i) + self.all_backported_upstream_commits.add(i) + for x in self.lookback_commits: + self.all_backported_upstream_commits.add(x) + logging.debug("which comes from %d upstream commits" % len(self.all_backported_upstream_commits)) + upstream_range_begin = self.get_merge_base(list(upstream_commits_in_this_backport) + [self.upstream_head]) + upstream_range = "{}~1..{}".format(upstream_range_begin, self.upstream_head) + upstream_commits = self.collect_commits(upstream_range) + logging.debug("upstream earliest commit being checked: %s" % self.commits[upstream_range_begin].subject) + logging.debug("which consists of %d total commits" % len(upstream_commits)) + self.upstream_fixes = defaultdict(set) + for c in upstream_commits: + co = self.commits[c] + if co.fixes: + self.upstream_fixes[co.fixes].add(co.commit_id) + logging.debug("from there, we found %d upstream fixes to cross-check in the range %s" % ( + len(self.upstream_fixes), + upstream_range)) + ok = True + for i, c in enumerate(self.downstream_commits): + co = self.commits[c] + ucs = co.upstream_commit_ids + logging.info("[%03d/%03d] %s" % (i + 1, len(self.downstream_commits), co.subject)) + if not ucs: + logging.debug("downstream commit %s has no upstream id" % c) + if self.allow_missing_upstream: + continue + else: + self.failed_commits.append( + (c, "no upstream id") + ) + ok = False + continue + logging.debug("checking downstream commit %s (backport of %s)" % (c, " and " .join(ucs))) + for uc in ucs: + if not self.check_upstream_fix(c, uc): + ok = False + if self.failed_commits: + logging.error("These backport commits need more attention:") + for cid, msg in self.failed_commits: + logging.error("%s %s" % (cid, msg)) + return ok + + def check_upstream_fix(self, downstream_commit, upstream_commit): + logging.debug(" searching for fix to commit %s in upstream..." % upstream_commit) + ufcs = self.upstream_fixes.get(upstream_commit[:12]) + if not ufcs: + logging.debug(" commit %s has no upstream fix commit" % upstream_commit) + return True + logging.debug(" downstream commit %s has upstream fix commit %s" % (downstream_commit, ufcs)) + ok = True + for ufc in ufcs: + if ufc not in self.all_backported_upstream_commits: + logging.debug(" upstream commit %s is NOT backported" % ufc) + self.failed_commits.append( + (downstream_commit, "Downstream commit {} needs upstream fix {} which is not backported".format( + self.commits[downstream_commit], + self.commits[ufc])) + ) + ok = False + ok = self.check_upstream_fix(downstream_commit, ufc) and ok + return ok + + def get_merge_base(self, commits): + if len(commits) == 1: + cmd = ['git', 'rev-parse'] + commits + return subprocess.check_output(cmd).decode().strip() + cmd = ['git', 'merge-base'] + commits + return subprocess.check_output(cmd).decode().strip() + + def collect_commits(self, r): + fmt = '--- COMMIT ---%n%H%n%s%n%b' + cmd = ['git', 'log', '--reverse', '--format=' + fmt, r] + out = subprocess.check_output(cmd).decode() + ret = [] + for x in out.split('--- COMMIT ---\n'): + if not x.strip(): + continue + try: + h, s, b = (x.strip() + "\n\n").split('\n', maxsplit=2) + c = Commit(h, s, b) + self.commits[h] = c + ret.append(h) + except Exception as e: + logging.error("Failed to parse commit: %s:%s\n" % (e, x)) + return ret + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--debug", "-D", action="store_true", help="Enable debug output") + parser.add_argument("--range", "-r", required=True, help="backport commit range to check") + parser.add_argument("--lookback", "-B", type=int, default=1000, help="how many previous backports to inspect") + parser.add_argument("--upstream", "-u", default="upstream/master", help="upstream branch name") + parser.add_argument("--allow-missing-upstream", "-s", action="store_true") + return parser.parse_args() + +def main(): + args = parse_args() + if args.debug: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + checker = BackportChecker(args.range, args.lookback, args.upstream, + args.allow_missing_upstream) + if not checker.check(): + sys.exit(1) + +main() diff --git a/.github/code/tests/ltp.yml b/.github/code/tests/ltp.yml new file mode 100644 index 0000000000000..c9e86f1567458 --- /dev/null +++ b/.github/code/tests/ltp.yml @@ -0,0 +1,48 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +name: LTP +repo: + url: https://github.com/linux-test-project/ltp.git + branch: master + timeout: 120 +before_test: + script: + - apt-get update -y + - apt install -y gcc git make pkgconf autoconf automake bison flex m4 libc6-dev ntpdate ntp ethtool + - hwclock -w + - cd ltp && make autotools && ./configure && make -j$(nproc) && make install && mkdir -p /ltp_log + timeout: 720 + repeat_on_fail: 3 +test: + script: + - | + if [ `uname -r | grep "6.6"` ]; then + /opt/ltp/runltp -p -d /ltp_log -l /ltp_log/result -o /ltp_log/output -C /ltp_log/failed -T /ltp_log/conf -f syscalls -S /opt/ltp_whitelist || true + else + /opt/ltp/runltp -p -d /ltp_log -l /ltp_log/result -o /ltp_log/output -C /ltp_log/failed -T /ltp_log/conf -f syscalls || true + fi + - | + if [ ! -f /ltp_log/failed ]; then + exit 0 + fi + for i in {1..5}; do + succeed=1 + while read cmd; do + echo Retry command $cmd for $i times + if ! eval /opt/ltp/testcases/bin/$cmd; then + echo Retry failed + succeed=0 + break + fi + done < <(cat /ltp_log/failed | cut -d' ' -f2-) + if [ $succeed == 1 ]; then + exit 0 + fi + done + exit 1 + timeout: 7200 + repeat: 1 +log_dir: + - /ltp_log +test_on_kvm: true diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml new file mode 100644 index 0000000000000..10c42a842ff7d --- /dev/null +++ b/.github/workflows/build-and-test.yml @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +name: Kernel Build/Test Check + +on: + workflow_call: + secrets: + ssh_key: + required: true + +jobs: + build-and-test-kernel: + runs-on: [self-hosted] + strategy: + matrix: + arch: [x86_64] + distro_code: [buster] + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 1 + ssh-key: ${{ secrets.ssh_key }} + set-safe-directory: true + - name: PrepareCICode + uses: bytedance/kernel/.github/actions/prepare-ci-code@5.4.143-velinux + - name: BuildKernel + uses: bytedance/kernel/.github/actions/build-kernel@5.4.143-velinux + with: + arch: ${{ matrix.arch }} + distro-code: ${{ matrix.distro_code }} + - name: TestKernel + uses: bytedance/kernel/.github/actions/test-kernel@5.4.143-velinux + with: + arch: ${{ matrix.arch }} + distro-code: ${{ matrix.distro_code }} diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml new file mode 100644 index 0000000000000..ab1cfe84c1c21 --- /dev/null +++ b/.github/workflows/static.yml @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 ByteDance. + +name: Static Check + +on: + workflow_call: + secrets: + ssh_key: + required: true + +env: + COMMIT_RANGE: "origin/${{ github.base_ref }}..HEAD" + +jobs: + check-format: + runs-on: [self-hosted] + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 2000 + ssh-key: ${{ secrets.ssh_key }} + set-safe-directory: true + - name: FetchRefs + uses: bytedance/kernel/.github/actions/fetch-refs@5.4.143-velinux + with: + depth: 2000 + - name: PrepareCICode + uses: bytedance/kernel/.github/actions/prepare-ci-code@5.4.143-velinux + - name: CheckFormat + run: | + set -e + code_path=${{ github.workspace }}/tmp/.github/code + $code_path/scripts/checkpatch ${{ env.COMMIT_RANGE }} + search-fixes: + runs-on: [self-hosted] + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 2000 + ssh-key: ${{ secrets.ssh_key }} + set-safe-directory: true + - name: FetchRefs + uses: bytedance/kernel/.github/actions/fetch-refs@5.4.143-velinux + with: + depth: 2000 + - name: PrepareCICode + uses: bytedance/kernel/.github/actions/prepare-ci-code@5.4.143-velinux + - name: SearchFixes + run: | + set -e + code_path=${{ github.workspace }}/tmp/.github/code + if ! git remote -v | grep -q upstream; then + git remote add upstream https://github.com/torvalds/linux.git || true + fi + git fetch upstream master -ap + $code_path/scripts/git-check-backports -s -r ${{ env.COMMIT_RANGE }} -u upstream/master