From 0904ede3029f336f6e65be43a18c69f7bf7d8a42 Mon Sep 17 00:00:00 2001 From: strtgbb <146047128+strtgbb@users.noreply.github.com> Date: Thu, 25 Sep 2025 10:06:44 -0400 Subject: [PATCH 001/362] Rebase CICD on v25.8.7.3-lts --- .../10_project-antalya-bug-report.md | 36 + .github/ISSUE_TEMPLATE/10_question.yaml | 20 - .../ISSUE_TEMPLATE/20_feature-request.yaml | 38 - .../20_project-antalya-feature-request.md | 20 + .../30_project-antalya-question.md | 16 + .../30_unexpected-behaviour.yaml | 55 - .../35_incomplete_implementation.yaml | 50 - .../40_altinity-stable-bug-report.md | 50 + .../ISSUE_TEMPLATE/45_usability-issue.yaml | 48 - .../50_altinity-stable-question.md | 16 + .github/ISSUE_TEMPLATE/50_build-issue.yaml | 50 - .../60_documentation-issue.yaml | 26 - .../ISSUE_TEMPLATE/70_performance-issue.yaml | 48 - .../80_backward-compatibility.yaml | 48 - .github/ISSUE_TEMPLATE/85_bug-report.yaml | 76 - .github/ISSUE_TEMPLATE/90_fuzzing-report.yaml | 26 - .../ISSUE_TEMPLATE/95_sanitizer-report.yaml | 26 - .../96_installation-issues.yaml | 46 - .github/PULL_REQUEST_TEMPLATE.md | 31 +- .github/actionlint.yml | 10 +- .../actions/create_workflow_report/action.yml | 52 + .../ci_run_report.html.jinja | 272 ++ .../create_workflow_report.py | 864 +++++ .../workflow_report_hook.sh | 7 + .github/actions/docker_setup/action.yml | 32 + .github/actions/runner_setup/action.yml | 19 + .github/grype/parse_vulnerabilities_grype.py | 32 + .github/grype/run_grype_scan.sh | 18 + .../grype/transform_and_upload_results_s3.sh | 20 + .github/retry.sh | 22 + .github/workflows/README.md | 13 + .github/workflows/backport_branches.yml | 447 ++- .github/workflows/cancel.yml | 19 + .github/workflows/cherry_pick.yml | 2 +- .github/workflows/compare_fails.yml | 110 + .github/workflows/create_release.yml | 2 +- .github/workflows/docker_publish.yml | 150 + .github/workflows/grype_scan.yml | 154 + .github/workflows/init_praktika.yml | 27 + .github/workflows/master.yml | 3092 ++++++++--------- .github/workflows/merge_queue.yml | 121 +- .github/workflows/nightly_fuzzers.yml | 67 +- .github/workflows/nightly_jepsen.yml | 67 +- .github/workflows/nightly_statistics.yml | 31 +- .github/workflows/pull_request.yml | 3007 ++++++++-------- .github/workflows/regression.yml | 1040 ++++++ .github/workflows/release_branches.yml | 574 ++- .github/workflows/repo-sanity-checks.yml | 150 + .github/workflows/reusable_sign.yml | 166 + .github/workflows/reusable_simple_job.yml | 2 +- .github/workflows/scheduled_runs.yml | 55 + .github/workflows/sign_and_release.yml | 567 +++ .github/workflows/vectorsearchstress.yml | 40 +- ci/defs/defs.py | 203 +- ci/defs/job_configs.py | 119 +- ci/docker/binary-builder/Dockerfile | 8 +- ci/docker/cctools/Dockerfile | 4 +- ci/docker/compatibility/centos/Dockerfile | 2 +- ci/docker/compatibility/ubuntu/Dockerfile | 2 +- ci/docker/docs-builder/Dockerfile | 2 +- ci/docker/fasttest/Dockerfile | 4 +- ci/docker/fuzzer/Dockerfile | 2 +- ci/docker/integration/arrowflight/Dockerfile | 2 +- ci/docker/integration/base/Dockerfile | 8 +- .../clickhouse_with_hms_catalog/Dockerfile | 2 +- .../clickhouse_with_unity_catalog/Dockerfile | 2 +- .../integration/helper_container/Dockerfile | 2 +- ci/docker/integration/kerberos_kdc/Dockerfile | 2 +- .../mysql_dotnet_client/Dockerfile | 2 +- .../mysql_golang_client/Dockerfile | 2 +- .../integration/mysql_java_client/Dockerfile | 2 +- .../integration/mysql_js_client/Dockerfile | 2 +- .../integration/mysql_php_client/Dockerfile | 2 +- .../postgresql_java_client/Dockerfile | 2 +- ci/docker/integration/resolver/Dockerfile | 2 +- ci/docker/integration/runner/Dockerfile | 8 +- .../integration/runner/dockerd-entrypoint.sh | 4 +- ci/docker/integration/s3_proxy/Dockerfile | 2 +- ci/docker/keeper-jepsen-test/Dockerfile | 2 +- ci/docker/libfuzzer/Dockerfile | 2 +- ci/docker/performance-comparison/Dockerfile | 4 +- ci/docker/server-jepsen-test/Dockerfile | 2 +- ci/docker/sqlancer-test/Dockerfile | 2 +- ci/docker/stateless-test/Dockerfile | 8 +- ci/docker/stress-test/Dockerfile | 2 +- ci/docker/style-test/Dockerfile | 2 +- ci/docker/test-base/Dockerfile | 6 +- ci/jobs/build_clickhouse.py | 21 +- ci/jobs/clickbench.py | 4 +- ci/jobs/functional_tests.py | 32 +- ci/jobs/install_check.py | 9 +- ci/jobs/integration_test_check.py | 6 +- ci/jobs/scripts/clickhouse_proc.py | 25 +- ci/jobs/scripts/clickhouse_version.py | 73 +- ci/jobs/scripts/functional_tests_results.py | 53 +- ci/jobs/scripts/fuzzer/run-fuzzer.sh | 7 +- ci/jobs/scripts/integration_tests_runner.py | 136 +- ci/jobs/scripts/workflow_hooks/filter_job.py | 17 + ci/praktika/_environment.py | 39 +- ci/praktika/execution/__main__.py | 4 + ci/praktika/execution/execution_settings.py | 2 +- ci/praktika/gh.py | 4 +- ci/praktika/hook_cache.py | 2 +- ci/praktika/job.py | 2 +- ci/praktika/native_jobs.py | 21 +- ci/praktika/result.py | 2 + ci/praktika/runner.py | 12 + ci/praktika/s3.py | 41 + ci/praktika/yaml_additional_templates.py | 131 + ci/praktika/yaml_generator.py | 51 +- ci/settings/altinity_overrides.py | 55 + ci/settings/settings.py | 8 +- ci/workflows/VectorSearchStress.py | 2 +- ci/workflows/backport_branches.py | 1 + ci/workflows/master.py | 33 +- ci/workflows/merge_queue.py | 4 +- ci/workflows/pull_request.py | 63 +- ci/workflows/release_branches.py | 1 + cmake/autogenerated_versions.txt | 9 +- cmake/version.cmake | 9 +- docker/keeper/Dockerfile | 2 +- docker/server/README.md | 2 +- docker/server/README.src/github-repo | 2 +- docker/server/README.src/license.md | 2 +- docker/server/README.src/logo.svg | 56 +- docker/server/README.src/maintainer.md | 2 +- docker/test/upgrade/Dockerfile | 29 + packages/clickhouse-client.yaml | 6 +- packages/clickhouse-common-static-dbg.yaml | 6 +- packages/clickhouse-common-static.yaml | 6 +- packages/clickhouse-keeper-dbg.yaml | 6 +- packages/clickhouse-keeper.yaml | 6 +- packages/clickhouse-server.yaml | 6 +- programs/server/binary.html | 5 +- programs/server/dashboard.html | 2 +- programs/server/index.html | 109 +- programs/server/merges.html | 2 +- programs/server/play.html | 46 +- src/Common/SignalHandlers.cpp | 6 +- tests/broken_tests.json | 420 +++ tests/ci/changelog.py | 56 +- tests/ci/ci.py | 6 +- tests/ci/ci_buddy.py | 4 +- tests/ci/ci_cache.py | 3 +- tests/ci/ci_config.py | 24 +- tests/ci/ci_definitions.py | 45 +- tests/ci/ci_fuzzer_check.py | 3 +- tests/ci/clickhouse_helper.py | 18 +- tests/ci/commit_status_helper.py | 9 +- tests/ci/compatibility_check.py | 4 +- tests/ci/create_release.py | 2 +- tests/ci/docker_images_helper.py | 7 +- tests/ci/docker_server.py | 13 +- tests/ci/env_helper.py | 17 +- tests/ci/get_robot_token.py | 14 +- tests/ci/git_helper.py | 44 +- tests/ci/jepsen_check.py | 4 +- tests/ci/libfuzzer_test_check.py | 2 +- tests/ci/pr_info.py | 26 +- .../packaging/ansible/inventory/localhost.yml | 73 + .../roles/get_cloudfront_info/tasks/main.yml | 34 + .../ansible/roles/publish_pkgs/tasks/main.yml | 98 + .../roles/update_bin_repo/tasks/main.yml | 52 + .../roles/update_deb_repo/tasks/main.yml | 61 + .../templates/apt-ftparchive-stable.conf | 6 + .../templates/apt-ftparchive.conf | 17 + .../roles/update_rpm_repo/tasks/main.yml | 51 + .../roles/update_rpm_repo/templates/repo.j2 | 7 + .../update_rpm_repo/templates/rpmmacros.j2 | 1 + .../roles/update_tar_repo/tasks/main.yml | 61 + .../packaging/ansible/sign-and-release.yml | 8 + .../release/packaging/dirindex/dirindexgen.py | 122 + .../packaging/static/bootstrap.bundle.min.js | 7 + tests/ci/report.py | 5 +- tests/ci/s3_helper.py | 41 + tests/ci/sign_release.py | 97 + tests/ci/sqltest.py | 2 +- tests/ci/stress_check.py | 30 +- tests/ci/version_helper.py | 140 +- tests/config/config.d/azure_storage_conf.xml | 8 +- .../compose/docker_compose_arrowflight.yml | 2 +- .../compose/docker_compose_azurite.yml | 2 +- .../compose/docker_compose_clickhouse.yml | 2 +- .../compose/docker_compose_dotnet_client.yml | 2 +- .../compose/docker_compose_jdbc_bridge.yml | 1 + .../compose/docker_compose_keeper.yml | 6 +- .../docker_compose_kerberized_kafka.yml | 2 +- .../compose/docker_compose_kerberos_kdc.yml | 2 +- .../compose/docker_compose_minio.yml | 6 +- .../docker_compose_mysql_dotnet_client.yml | 2 +- .../docker_compose_mysql_golang_client.yml | 2 +- .../docker_compose_mysql_java_client.yml | 2 +- .../docker_compose_mysql_js_client.yml | 2 +- .../docker_compose_mysql_php_client.yml | 2 +- .../compose/docker_compose_nginx.yml | 2 +- .../docker_compose_postgresql_java_client.yml | 2 +- tests/integration/helpers/cluster.py | 11 +- tests/integration/helpers/network.py | 2 +- tests/integration/integration_test_images.py | 32 +- tests/integration/runner | 2 +- .../test_attach_partition_using_copy/test.py | 4 +- .../test_backward_compatibility/test.py | 2 +- .../test_aggregate_fixed_key.py | 2 +- .../test_aggregate_function_state.py | 4 +- .../test_convert_ordinary.py | 2 +- .../test_cte_distributed.py | 2 +- .../test_functions.py | 2 +- .../test_insert_profile_events.py | 2 +- .../test_ip_types_binary_compatibility.py | 2 +- .../test_memory_bound_aggregation.py | 4 +- .../test_normalized_count_comparison.py | 2 +- .../test_rocksdb_upgrade.py | 2 +- .../test_select_aggregate_alias_column.py | 2 +- .../test_short_strings_aggregation.py | 12 +- ...test_vertical_merges_from_compact_parts.py | 2 +- tests/integration/test_cow_policy/test.py | 4 +- tests/integration/test_database_delta/test.py | 2 +- .../test_disk_over_web_server/test.py | 2 +- .../test.py | 2 +- .../test_lightweight_updates/test.py | 2 +- tests/integration/test_old_versions/test.py | 2 +- .../test_polymorphic_parts/test.py | 2 +- .../test.py | 4 +- .../test_replicating_constants/test.py | 4 +- .../test_trace_log_build_id/test.py | 2 +- tests/integration/test_ttl_replicated/test.py | 6 +- tests/integration/test_version_update/test.py | 2 +- .../test.py | 6 +- .../queries/0_stateless/01528_play.reference | 2 +- tests/queries/0_stateless/01528_play.sh | 2 +- utils/tests-visualizer/index.html | 2 +- 231 files changed, 10565 insertions(+), 4834 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md delete mode 100644 .github/ISSUE_TEMPLATE/10_question.yaml delete mode 100644 .github/ISSUE_TEMPLATE/20_feature-request.yaml create mode 100644 .github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md create mode 100644 .github/ISSUE_TEMPLATE/30_project-antalya-question.md delete mode 100644 .github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml delete mode 100644 .github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml create mode 100644 .github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md delete mode 100644 .github/ISSUE_TEMPLATE/45_usability-issue.yaml create mode 100644 .github/ISSUE_TEMPLATE/50_altinity-stable-question.md delete mode 100644 .github/ISSUE_TEMPLATE/50_build-issue.yaml delete mode 100644 .github/ISSUE_TEMPLATE/60_documentation-issue.yaml delete mode 100644 .github/ISSUE_TEMPLATE/70_performance-issue.yaml delete mode 100644 .github/ISSUE_TEMPLATE/80_backward-compatibility.yaml delete mode 100644 .github/ISSUE_TEMPLATE/85_bug-report.yaml delete mode 100644 .github/ISSUE_TEMPLATE/90_fuzzing-report.yaml delete mode 100644 .github/ISSUE_TEMPLATE/95_sanitizer-report.yaml delete mode 100644 .github/ISSUE_TEMPLATE/96_installation-issues.yaml create mode 100644 .github/actions/create_workflow_report/action.yml create mode 100644 .github/actions/create_workflow_report/ci_run_report.html.jinja create mode 100755 .github/actions/create_workflow_report/create_workflow_report.py create mode 100755 .github/actions/create_workflow_report/workflow_report_hook.sh create mode 100644 .github/actions/docker_setup/action.yml create mode 100644 .github/actions/runner_setup/action.yml create mode 100644 .github/grype/parse_vulnerabilities_grype.py create mode 100755 .github/grype/run_grype_scan.sh create mode 100755 .github/grype/transform_and_upload_results_s3.sh create mode 100755 .github/retry.sh create mode 100644 .github/workflows/README.md create mode 100644 .github/workflows/cancel.yml create mode 100644 .github/workflows/compare_fails.yml create mode 100644 .github/workflows/docker_publish.yml create mode 100644 .github/workflows/grype_scan.yml create mode 100644 .github/workflows/init_praktika.yml create mode 100644 .github/workflows/regression.yml create mode 100644 .github/workflows/repo-sanity-checks.yml create mode 100644 .github/workflows/reusable_sign.yml create mode 100644 .github/workflows/scheduled_runs.yml create mode 100644 .github/workflows/sign_and_release.yml create mode 100644 ci/praktika/yaml_additional_templates.py create mode 100644 ci/settings/altinity_overrides.py create mode 100644 docker/test/upgrade/Dockerfile create mode 100644 tests/broken_tests.json create mode 100644 tests/ci/release/packaging/ansible/inventory/localhost.yml create mode 100644 tests/ci/release/packaging/ansible/roles/get_cloudfront_info/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/publish_pkgs/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/update_bin_repo/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive-stable.conf create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive.conf create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/repo.j2 create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/rpmmacros.j2 create mode 100644 tests/ci/release/packaging/ansible/roles/update_tar_repo/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/sign-and-release.yml create mode 100755 tests/ci/release/packaging/dirindex/dirindexgen.py create mode 100644 tests/ci/release/packaging/static/bootstrap.bundle.min.js create mode 100644 tests/ci/sign_release.py diff --git a/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md b/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md new file mode 100644 index 000000000000..0c8c15a05eaf --- /dev/null +++ b/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md @@ -0,0 +1,36 @@ +--- +name: Project Antalya Bug Report +about: Help us improve Project Antalya +title: '' +labels: antalya +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Key information** +Provide relevant runtime details. + - Project Antalya Build Version + - Cloud provider, e.g., AWS + - Kubernetes provider, e.g., GKE or Minikube + - Object storage, e.g., AWS S3 or Minio + - Iceberg catalog, e.g., Glue with REST Proxy + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/10_question.yaml b/.github/ISSUE_TEMPLATE/10_question.yaml deleted file mode 100644 index 71a3d3da6425..000000000000 --- a/.github/ISSUE_TEMPLATE/10_question.yaml +++ /dev/null @@ -1,20 +0,0 @@ -name: Question -description: Ask a question about ClickHouse -labels: ["question"] -body: - - type: markdown - attributes: - value: | - > Make sure to check documentation https://clickhouse.com/docs/ first. If the question is concise and probably has a short answer, asking it in [community Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-1gh9ds7f4-PgDhJAaF8ad5RbWBAAjzFg) is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Question - description: Please put your question here. - validations: - required: true diff --git a/.github/ISSUE_TEMPLATE/20_feature-request.yaml b/.github/ISSUE_TEMPLATE/20_feature-request.yaml deleted file mode 100644 index 054efc2d61ee..000000000000 --- a/.github/ISSUE_TEMPLATE/20_feature-request.yaml +++ /dev/null @@ -1,38 +0,0 @@ -name: Feature request -description: Suggest an idea for ClickHouse -labels: ["feature"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Use case - description: A clear and concise description of what the intended usage scenario is. - validations: - required: true - - type: textarea - attributes: - label: Describe the solution you'd like - description: A clear and concise description of what you want to happen. - validations: - required: true - - type: textarea - attributes: - label: Describe alternatives you've considered - description: A clear and concise description of any alternative solutions or features you've considered. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context or screenshots about the feature request here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md b/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md new file mode 100644 index 000000000000..603584bf4428 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md @@ -0,0 +1,20 @@ +--- +name: Project Antalya Feature request +about: Suggest an idea for Project Antalya +title: '' +labels: antalya, enhancement +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/30_project-antalya-question.md b/.github/ISSUE_TEMPLATE/30_project-antalya-question.md new file mode 100644 index 000000000000..c77cee4a916b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/30_project-antalya-question.md @@ -0,0 +1,16 @@ +--- +name: Project Antalya Question +about: Ask a question about Project Antalya +title: '' +labels: '' +assignees: '' + +--- + +Make sure to check the [Altinity documentation](https://docs.altinity.com/) and the [Altinity Knowledge Base](https://kb.altinity.com/) first. + +If your question is concise and probably has a short answer, asking it in the [the Altinity Slack workspace](https://altinity.com/slack) is probably the fastest way to find the answer. Use the #antalya channel. + +If you'd rather file a GitHub issue, remove all this text and ask your question here. + +Please include relevant environment information as applicable. diff --git a/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml b/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml deleted file mode 100644 index 7a34c4bb7ba8..000000000000 --- a/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml +++ /dev/null @@ -1,55 +0,0 @@ -name: Unexpected behaviour -description: Some feature is working in non-obvious way -labels: ["unexpected behaviour"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the unexpected behaviour - description: A clear and concise description of what doesn't work as it is supposed to. - validations: - required: true - - type: textarea - attributes: - label: Which ClickHouse versions are affected? - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which ClickHouse server version to use - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Expected behavior - description: A clear and concise description of what you expected to happen. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml b/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml deleted file mode 100644 index 969c1893e6f5..000000000000 --- a/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml +++ /dev/null @@ -1,50 +0,0 @@ -name: Incomplete implementation -description: Implementation of existing feature is not finished -labels: ["unfinished code"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the unexpected behaviour - description: A clear and concise description of what works not as it is supposed to. - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which ClickHouse server version to use - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Expected behavior - description: A clear and concise description of what you expected to happen. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md b/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md new file mode 100644 index 000000000000..90bf241dc195 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md @@ -0,0 +1,50 @@ +--- +name: Altinity Stable Bug report +about: Report something broken in an Altinity Stable Build +title: '' +labels: stable +assignees: '' + +--- + +✅ *I checked [the Altinity Stable Builds lifecycle table](https://docs.altinity.com/altinitystablebuilds/#altinity-stable-builds-life-cycle-table), and the Altinity Stable Build version I'm using is still supported.* + +## Type of problem +Choose one of the following items, then delete the others: + +**Bug report** - something's broken + +**Incomplete implementation** - something's not quite right + +**Performance issue** - something works, just not as quickly as it should + +**Backwards compatibility issue** - something used to work, but now it doesn't + +**Unexpected behavior** - something surprising happened, but it wasn't the good kind of surprise + +**Installation issue** - something doesn't install the way it should + +**Usability issue** - something works, but it could be a lot easier + +**Documentation issue** - something in the docs is wrong, incomplete, or confusing + +## Describe the situation +A clear, concise description of what's happening. Can you reproduce it in a ClickHouse Official build of the same version? + +## How to reproduce the behavior + +* Which Altinity Stable Build version to use +* Which interface to use, if it matters +* Non-default settings, if any +* `CREATE TABLE` statements for all tables involved +* Sample data for all these tables, use the [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/31fd4f5eb41d5ec26724fc645c11fe4d62eae07f/programs/obfuscator/README.md) if necessary +* Queries to run that lead to an unexpected result + +## Expected behavior +A clear, concise description of what you expected to happen. + +## Logs, error messages, stacktraces, screenshots... +Add any details that might explain the issue. + +## Additional context +Add any other context about the issue here. diff --git a/.github/ISSUE_TEMPLATE/45_usability-issue.yaml b/.github/ISSUE_TEMPLATE/45_usability-issue.yaml deleted file mode 100644 index 0d2ae1a580e5..000000000000 --- a/.github/ISSUE_TEMPLATE/45_usability-issue.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: Usability improvement request -description: Report something can be made more convenient to use -labels: ["usability"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the improvement - description: A clear and concise description of what you want to happen - validations: - required: true - - type: textarea - attributes: - label: Which ClickHouse versions are affected? - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md b/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md new file mode 100644 index 000000000000..027970e25a02 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md @@ -0,0 +1,16 @@ +--- +name: Altinity Stable Question +about: Ask a question about an Altinity Stable Build +title: '' +labels: question, stable +assignees: '' + +--- + +Make sure to check the [Altinity documentation](https://docs.altinity.com/) and the [Altinity Knowledge Base](https://kb.altinity.com/) first. + +If your question is concise and probably has a short answer, asking it in the [the Altinity Slack channel](https://altinity.com/slack) is probably the fastest way to find the answer. + +For more complicated questions, consider [asking them on StackOverflow with the tag "clickhouse"](https://stackoverflow.com/questions/tagged/clickhouse). + +If you'd rather file a GitHub issue, remove all this text and ask your question here. diff --git a/.github/ISSUE_TEMPLATE/50_build-issue.yaml b/.github/ISSUE_TEMPLATE/50_build-issue.yaml deleted file mode 100644 index 0549944c0bb2..000000000000 --- a/.github/ISSUE_TEMPLATE/50_build-issue.yaml +++ /dev/null @@ -1,50 +0,0 @@ -name: Build issue -description: Report failed ClickHouse build from master -labels: ["build"] -body: - - type: markdown - attributes: - value: | - > Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/ - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the problem - description: A clear and concise description of what doesn't work as it is supposed to. - validations: - required: true - - type: textarea - attributes: - label: Operating system - description: OS kind or distribution, specific version/release, non-standard kernel if any. If you are trying to build inside virtual machine, please mention it too. - validations: - required: false - - type: textarea - attributes: - label: CMake version - description: The output of `cmake --version`. - validations: - required: false - - type: textarea - attributes: - label: Ninja version - description: The output of `ninja --version`. - validations: - required: false - - type: textarea - attributes: - label: Compiler name and version - description: We recommend to use clang. The version can be obtained via `clang --version`. - validations: - required: false - - type: textarea - attributes: - label: Full cmake and/or ninja output with the error - description: Please include everything (use https://pastila.nl/ for large output)! - validations: - required: true diff --git a/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml b/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml deleted file mode 100644 index bba6df87a783..000000000000 --- a/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Documentation issue -description: Report something incorrect or missing in documentation -labels: ["comp-documentation"] -body: - - type: markdown - attributes: - value: | - > Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/ - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the issue - description: A clear and concise description of what's wrong in documentation. - validations: - required: true - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/70_performance-issue.yaml b/.github/ISSUE_TEMPLATE/70_performance-issue.yaml deleted file mode 100644 index 1df99dc76fda..000000000000 --- a/.github/ISSUE_TEMPLATE/70_performance-issue.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: Performance issue -description: Report something working slower than expected -labels: ["performance"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the situation - description: What exactly works slower than expected? - validations: - required: true - - type: textarea - attributes: - label: Which ClickHouse versions are affected? - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Expected performance - description: What are your performance expectation, why do you think they are realistic? Has it been working faster in older ClickHouse releases? Is it working faster in some specific other system? - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml b/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml deleted file mode 100644 index 72f56d781979..000000000000 --- a/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: Backward compatibility issue -description: Report the case when the behaviour of a new version can break existing use cases -labels: ["backward compatibility"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the unexpected behaviour - description: A clear and concise description of what works not as it is supposed to. - validations: - required: true - - type: textarea - attributes: - label: Which ClickHouse versions are affected? - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/85_bug-report.yaml b/.github/ISSUE_TEMPLATE/85_bug-report.yaml deleted file mode 100644 index 349bf82a3a4e..000000000000 --- a/.github/ISSUE_TEMPLATE/85_bug-report.yaml +++ /dev/null @@ -1,76 +0,0 @@ -name: Bug report -description: Wrong behavior (visible to users) in the official ClickHouse release. -labels: ["potential bug"] -body: - - type: markdown - attributes: - value: | - > Please make sure that the version you're using is still supported (you can find the list [here](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md#scope-and-supported-versions)). - > You have to provide the following information whenever possible. - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe what's wrong - description: | - * A clear and concise description of what works not as it is supposed to. - * A link to reproducer in [https://fiddle.clickhouse.com/](https://fiddle.clickhouse.com/). - validations: - required: true - - type: dropdown - attributes: - label: Does it reproduce on the most recent release? - description: | - [The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv) - options: - - 'Yes' - - 'No' - validations: - required: true - - type: markdown - attributes: - value: | - ----- - > Change "enabled" to true in "send_crash_reports" section in `config.xml`: - ```xml - - - - true - - ``` - ----- - - type: textarea - attributes: - label: How to reproduce - description: | - * Which ClickHouse server version to use - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Expected behavior - description: A clear and concise description of what you expected to happen. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml b/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml deleted file mode 100644 index 84dc8a372e5a..000000000000 --- a/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Assertion found via fuzzing -description: Potential issue has been found via Fuzzer or Stress tests -labels: ["fuzz"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Describe the bug - description: A link to the report. - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: Try to reproduce the report and copy the tables and queries involved. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: You can find additional information in server logs. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml b/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml deleted file mode 100644 index 7bb47e2b824b..000000000000 --- a/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Sanitizer alert -description: Potential issue has been found by special code instrumentation -labels: ["testing"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Describe the bug - description: A link to the report. - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: Try to reproduce the report and copy the tables and queries involved. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: You can find additional information in server logs. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/96_installation-issues.yaml b/.github/ISSUE_TEMPLATE/96_installation-issues.yaml deleted file mode 100644 index f71f6079453e..000000000000 --- a/.github/ISSUE_TEMPLATE/96_installation-issues.yaml +++ /dev/null @@ -1,46 +0,0 @@ -name: Installation issue -description: Issue with ClickHouse installation from https://clickhouse.com/docs/en/install/ -labels: ["comp-install"] -body: - - type: markdown - attributes: - value: | - > **I have tried the following solutions**: https://clickhouse.com/docs/en/faq/troubleshooting/#troubleshooting-installation-errors - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Installation type - description: Packages, docker, single binary, curl? - validations: - required: true - - type: textarea - attributes: - label: Source of the ClickHouse - description: A link to the source. Or the command you've tried. - validations: - required: true - - type: textarea - attributes: - label: Describe the problem. - description: What went wrong and what is the expected result? - validations: - required: true - - type: textarea - attributes: - label: Error message and/or stacktrace - description: You can find additional information in server logs. - validations: - required: false - - type: textarea - attributes: - label: How to reproduce - description: | - * For Linux-based operating systems: provide a script for clear docker container from the official image - * For anything else: steps to reproduce on as much as possible clear system - validations: - required: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 7172463a5781..03b95568dea6 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -22,20 +22,19 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py ... ### Documentation entry for user-facing changes +... -- [ ] Documentation is written (mandatory for new features) - - +#### Exclude tests: +- [ ] Fast test +- [ ] Integration Tests +- [ ] Stateless tests +- [ ] Stateful tests +- [ ] Performance tests +- [ ] All with ASAN +- [ ] All with TSAN +- [ ] All with MSAN +- [ ] All with UBSAN +- [ ] All with Coverage +- [ ] All with Aarch64 +- [ ] All Regression +- [ ] Disable CI Cache diff --git a/.github/actionlint.yml b/.github/actionlint.yml index cf5f575e3c74..904a548dadd5 100644 --- a/.github/actionlint.yml +++ b/.github/actionlint.yml @@ -1,9 +1,9 @@ self-hosted-runner: labels: - - builder - - func-tester - - func-tester-aarch64 + - altinity-builder + - altinity-func-tester + - altinity-func-tester-aarch64 - fuzzer-unit-tester - - style-checker - - style-checker-aarch64 + - altinity-style-checker + - altinity-style-checker-aarch64 - release-maker diff --git a/.github/actions/create_workflow_report/action.yml b/.github/actions/create_workflow_report/action.yml new file mode 100644 index 000000000000..ff69d336ea19 --- /dev/null +++ b/.github/actions/create_workflow_report/action.yml @@ -0,0 +1,52 @@ +name: Create and Upload Combined Report +description: Create and upload a combined CI report +inputs: + workflow_config: + description: "Workflow config" + required: true + final: + description: "Control whether the report is final or a preview" + required: false + default: "false" +runs: + using: "composite" + steps: + - name: Create workflow config + shell: bash + run: | + mkdir -p ./ci/tmp + cat > ./ci/tmp/workflow_config.json << 'EOF' + ${{ inputs.workflow_config }} + EOF + + - name: Create and upload workflow report + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + ACTIONS_RUN_URL: ${{ github.event.repository.html_url }}/actions/runs/${{ github.run_id }} + COMMIT_SHA: ${{ steps.set_version.outputs.commit_sha || github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + FINAL: ${{ inputs.final }} + shell: bash + run: | + pip install clickhouse-driver==0.2.8 numpy==1.26.4 pandas==2.0.3 jinja2==3.1.5 + + CMD="python3 .github/actions/create_workflow_report/create_workflow_report.py" + ARGS="--actions-run-url $ACTIONS_RUN_URL --known-fails tests/broken_tests.json --cves --pr-number $PR_NUMBER" + + set +e -x + if [[ "$FINAL" == "false" ]]; then + REPORT_LINK=$($CMD $ARGS --mark-preview) + else + REPORT_LINK=$($CMD $ARGS) + fi + + echo $REPORT_LINK + + if [[ "$FINAL" == "true" ]]; then + IS_VALID_URL=$(echo $REPORT_LINK | grep -E '^https?://') + if [[ -n $IS_VALID_URL ]]; then + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + else + echo "Error: $REPORT_LINK" >> $GITHUB_STEP_SUMMARY + exit 1 + fi + fi diff --git a/.github/actions/create_workflow_report/ci_run_report.html.jinja b/.github/actions/create_workflow_report/ci_run_report.html.jinja new file mode 100644 index 000000000000..ac9a7a70bb01 --- /dev/null +++ b/.github/actions/create_workflow_report/ci_run_report.html.jinja @@ -0,0 +1,272 @@ + + + + + + + + {%- if is_preview %} + + {%- endif %} + + {{ title }} + + + + +

+ +

{{ title }}

+ + + + + + + + + + + + + + + + + + + + + + + +
Pull Request{{ pr_info_html }}
Workflow Run{{ workflow_id }}
Commit{{ commit_sha }}
Build Report{% for job_name, link in build_report_links.items() %}[{{ job_name }}] {% endfor %}
Date {{ date }}
+ {% if is_preview %} +

This is a preview. The workflow is not yet finished.

+ {% endif %} +

Table of Contents

+ + + {%- if pr_number != 0 -%} +

New Fails in PR

+

Compared with base sha {{ base_sha }}

+ {{ new_fails_html }} + {%- endif %} + +

CI Jobs Status

+ {{ ci_jobs_status_html }} + +

Checks Errors

+ {{ checks_errors_html }} + +

Checks New Fails

+ {{ checks_fails_html }} + +

Regression New Fails

+ {{ regression_fails_html }} + +

Docker Images CVEs

+ {{ docker_images_cves_html }} + +

Checks Known Fails

+

+ Fail reason conventions:
+ KNOWN - Accepted fail and fix is not planned
+ INVESTIGATE - We don't know why it fails
+ NEEDSFIX - Investigation done and a fix is needed to make it pass
+

+ {{ checks_known_fails_html }} + + + + \ No newline at end of file diff --git a/.github/actions/create_workflow_report/create_workflow_report.py b/.github/actions/create_workflow_report/create_workflow_report.py new file mode 100755 index 000000000000..56f7fd829525 --- /dev/null +++ b/.github/actions/create_workflow_report/create_workflow_report.py @@ -0,0 +1,864 @@ +#!/usr/bin/env python3 +import argparse +import os +from pathlib import Path +from itertools import combinations +import json +from datetime import datetime +from functools import lru_cache +from glob import glob +import urllib.parse + +import pandas as pd +from jinja2 import Environment, FileSystemLoader +import requests +from clickhouse_driver import Client +import boto3 +from botocore.exceptions import NoCredentialsError + +DATABASE_HOST_VAR = "CHECKS_DATABASE_HOST" +DATABASE_USER_VAR = "CLICKHOUSE_TEST_STAT_LOGIN" +DATABASE_PASSWORD_VAR = "CLICKHOUSE_TEST_STAT_PASSWORD" +S3_BUCKET = "altinity-build-artifacts" +GITHUB_REPO = "Altinity/ClickHouse" +GITHUB_TOKEN = os.getenv("GITHUB_TOKEN") or os.getenv("GH_TOKEN") + +def get_commit_statuses(sha: str) -> pd.DataFrame: + """ + Fetch commit statuses for a given SHA and return as a pandas DataFrame. + Handles pagination to get all statuses. + + Args: + sha (str): Commit SHA to fetch statuses for. + + Returns: + pd.DataFrame: DataFrame containing all statuses. + """ + headers = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json", + } + + url = f"https://api.github.com/repos/{GITHUB_REPO}/commits/{sha}/statuses" + + all_data = [] + + while url: + response = requests.get(url, headers=headers) + + if response.status_code != 200: + raise Exception( + f"Failed to fetch statuses: {response.status_code} {response.text}" + ) + + data = response.json() + all_data.extend(data) + + # Check for pagination links in the response headers + if "Link" in response.headers: + links = response.headers["Link"].split(",") + next_url = None + + for link in links: + parts = link.strip().split(";") + if len(parts) == 2 and 'rel="next"' in parts[1]: + next_url = parts[0].strip("<>") + break + + url = next_url + else: + url = None + + # Parse relevant fields + parsed = [ + { + "job_name": item["context"], + "job_status": item["state"], + "message": item["description"], + "results_link": item["target_url"], + } + for item in all_data + ] + + # Create DataFrame + df = pd.DataFrame(parsed) + + # Drop duplicates keeping the first occurrence (newest status for each context) + # GitHub returns statuses in reverse chronological order + df = df.drop_duplicates(subset=["job_name"], keep="first") + + # Sort by status and job name + return df.sort_values( + by=["job_status", "job_name"], ascending=[True, True] + ).reset_index(drop=True) + + +def get_pr_info_from_number(pr_number: str) -> dict: + """ + Fetch pull request information for a given PR number. + + Args: + pr_number (str): Pull request number to fetch information for. + + Returns: + dict: Dictionary containing PR information. + """ + headers = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json", + } + + url = f"https://api.github.com/repos/{GITHUB_REPO}/pulls/{pr_number}" + response = requests.get(url, headers=headers) + + if response.status_code != 200: + raise Exception( + f"Failed to fetch pull request info: {response.status_code} {response.text}" + ) + + return response.json() + + +@lru_cache +def get_run_details(run_url: str) -> dict: + """ + Fetch run details for a given run URL. + """ + run_id = run_url.split("/")[-1] + + headers = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json", + } + + url = f"https://api.github.com/repos/{GITHUB_REPO}/actions/runs/{run_id}" + response = requests.get(url, headers=headers) + + if response.status_code != 200: + raise Exception( + f"Failed to fetch run details: {response.status_code} {response.text}" + ) + + return response.json() + + +def get_checks_fails(client: Client, commit_sha: str, branch_name: str): + """ + Get tests that did not succeed for the given commit and branch. + Exclude checks that have status 'error' as they are counted in get_checks_errors. + """ + query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link + FROM ( + SELECT + argMax(check_status, check_start_time) as job_status, + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE test_status IN ('FAIL', 'ERROR') + AND job_status!='error' + ORDER BY job_name, test_name + """ + return client.query_dataframe(query) + + +def get_checks_known_fails( + client: Client, commit_sha: str, branch_name: str, known_fails: dict +): + """ + Get tests that are known to fail for the given commit and branch. + """ + if len(known_fails) == 0: + return pd.DataFrame() + + query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link + FROM ( + SELECT + argMax(check_status, check_start_time) as job_status, + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE test_status='BROKEN' + AND test_name IN ({','.join(f"'{test}'" for test in known_fails.keys())}) + ORDER BY job_name, test_name + """ + + df = client.query_dataframe(query) + + df.insert( + len(df.columns) - 1, + "reason", + df["test_name"] + .astype(str) + .apply( + lambda test_name: known_fails[test_name].get("reason", "No reason given") + ), + ) + + return df + + +def get_checks_errors(client: Client, commit_sha: str, branch_name: str): + """ + Get checks that have status 'error' for the given commit and branch. + """ + query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link + FROM ( + SELECT + argMax(check_status, check_start_time) as job_status, + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE job_status=='error' + ORDER BY job_name, test_name + """ + return client.query_dataframe(query) + + +def drop_prefix_rows(df, column_to_clean): + """ + Drop rows from the dataframe if: + - the row matches another row completely except for the specified column + - the specified column of that row is a prefix of the same column in another row + """ + to_drop = set() + reference_columns = [col for col in df.columns if col != column_to_clean] + for (i, row_1), (j, row_2) in combinations(df.iterrows(), 2): + if all(row_1[col] == row_2[col] for col in reference_columns): + if row_2[column_to_clean].startswith(row_1[column_to_clean]): + to_drop.add(i) + elif row_1[column_to_clean].startswith(row_2[column_to_clean]): + to_drop.add(j) + return df.drop(to_drop) + + +def get_regression_fails(client: Client, job_url: str): + """ + Get regression tests that did not succeed for the given job URL. + """ + # If you rename the alias for report_url, also update the formatters in format_results_as_html_table + # Nested SELECT handles test reruns + query = f"""SELECT arch, job_name, status, test_name, results_link + FROM ( + SELECT + architecture as arch, + test_name, + argMax(result, start_time) AS status, + job_name, + report_url as results_link, + job_url + FROM `gh-data`.clickhouse_regression_results + GROUP BY architecture, test_name, job_url, job_name, report_url + ORDER BY length(test_name) DESC + ) + WHERE job_url LIKE '{job_url}%' + AND status IN ('Fail', 'Error') + """ + df = client.query_dataframe(query) + df = drop_prefix_rows(df, "test_name") + df["job_name"] = df["job_name"].str.title() + return df + + +def get_new_fails_this_pr( + client: Client, + pr_info: dict, + checks_fails: pd.DataFrame, + regression_fails: pd.DataFrame, +): + """ + Get tests that failed in the PR but passed in the base branch. + Compares both checks and regression test results. + """ + base_sha = pr_info.get("base", {}).get("sha") + if not base_sha: + raise Exception("No base SHA found for PR") + + # Modify tables to have the same columns + if len(checks_fails) > 0: + checks_fails = checks_fails.copy().drop(columns=["job_status"]) + if len(regression_fails) > 0: + regression_fails = regression_fails.copy() + regression_fails["job_name"] = regression_fails.apply( + lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1 + ) + regression_fails["test_status"] = regression_fails["status"] + + # Combine both types of fails and select only desired columns + desired_columns = ["job_name", "test_name", "test_status", "results_link"] + all_pr_fails = pd.concat([checks_fails, regression_fails], ignore_index=True)[ + desired_columns + ] + if len(all_pr_fails) == 0: + return pd.DataFrame() + + # Get all checks from the base branch that didn't fail + base_checks_query = f"""SELECT job_name, status as test_status, test_name, results_link + FROM ( + SELECT + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{base_sha}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE test_status NOT IN ('FAIL', 'ERROR') + ORDER BY job_name, test_name + """ + base_checks = client.query_dataframe(base_checks_query) + + # Get regression results from base branch that didn't fail + base_regression_query = f"""SELECT arch, job_name, status, test_name, results_link + FROM ( + SELECT + architecture as arch, + test_name, + argMax(result, start_time) AS status, + job_url, + job_name, + report_url as results_link + FROM `gh-data`.clickhouse_regression_results + WHERE results_link LIKE'%/{base_sha}/%' + GROUP BY architecture, test_name, job_url, job_name, report_url + ORDER BY length(test_name) DESC + ) + WHERE status NOT IN ('Fail', 'Error') + """ + base_regression = client.query_dataframe(base_regression_query) + if len(base_regression) > 0: + base_regression["job_name"] = base_regression.apply( + lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1 + ) + base_regression["test_status"] = base_regression["status"] + base_regression = base_regression.drop(columns=["arch", "status"]) + + # Combine base results + base_results = pd.concat([base_checks, base_regression], ignore_index=True) + + # Find tests that failed in PR but passed in base + pr_failed_tests = set(zip(all_pr_fails["job_name"], all_pr_fails["test_name"])) + base_passed_tests = set(zip(base_results["job_name"], base_results["test_name"])) + + new_fails = pr_failed_tests.intersection(base_passed_tests) + + # Filter PR results to only include new fails + mask = all_pr_fails.apply( + lambda row: (row["job_name"], row["test_name"]) in new_fails, axis=1 + ) + new_fails_df = all_pr_fails[mask] + + return new_fails_df + + +@lru_cache +def get_workflow_config() -> dict: + workflow_config_files = glob("./ci/tmp/workflow_config*.json") + if len(workflow_config_files) == 0: + raise Exception("No workflow config file found") + if len(workflow_config_files) > 1: + raise Exception("Multiple workflow config files found") + with open(workflow_config_files[0], "r") as f: + return json.load(f) + + +def get_cached_job(job_name: str) -> dict: + workflow_config = get_workflow_config() + return workflow_config["cache_jobs"].get(job_name, {}) + + +def get_cves(pr_number, commit_sha, branch): + """ + Fetch Grype results from S3. + + If no results are available for download, returns ... (Ellipsis). + """ + s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL")) + prefixes_to_check = set() + + def format_prefix(pr_number, commit_sha, branch): + if pr_number == 0: + return f"REFs/{branch}/{commit_sha}/grype/" + else: + return f"PRs/{pr_number}/{commit_sha}/grype/" + + cached_server_job = get_cached_job("Docker server image") + if cached_server_job: + prefixes_to_check.add( + format_prefix( + cached_server_job["pr_number"], + cached_server_job["sha"], + cached_server_job["branch"], + ) + ) + cached_keeper_job = get_cached_job("Docker keeper image") + if cached_keeper_job: + prefixes_to_check.add( + format_prefix( + cached_keeper_job["pr_number"], + cached_keeper_job["sha"], + cached_keeper_job["branch"], + ) + ) + + if not prefixes_to_check: + prefixes_to_check = {format_prefix(pr_number, commit_sha, branch)} + + grype_result_dirs = [] + for s3_prefix in prefixes_to_check: + try: + response = s3_client.list_objects_v2( + Bucket=S3_BUCKET, Prefix=s3_prefix, Delimiter="/" + ) + grype_result_dirs.extend( + content["Prefix"] for content in response.get("CommonPrefixes", []) + ) + except Exception as e: + print(f"Error listing S3 objects at {s3_prefix}: {e}") + continue + + if len(grype_result_dirs) == 0: + # We were asked to check the CVE data, but none was found, + # maybe this is a preview report and grype results are not available yet + return ... + + results = [] + for path in grype_result_dirs: + file_key = f"{path}result.json" + try: + file_response = s3_client.get_object(Bucket=S3_BUCKET, Key=file_key) + content = file_response["Body"].read().decode("utf-8") + results.append(json.loads(content)) + except Exception as e: + print(f"Error getting S3 object at {file_key}: {e}") + continue + + rows = [] + for scan_result in results: + for match in scan_result["matches"]: + rows.append( + { + "docker_image": scan_result["source"]["target"]["userInput"], + "severity": match["vulnerability"]["severity"], + "identifier": match["vulnerability"]["id"], + "namespace": match["vulnerability"]["namespace"], + } + ) + + if len(rows) == 0: + return pd.DataFrame() + + df = pd.DataFrame(rows).drop_duplicates() + df = df.sort_values( + by="severity", + key=lambda col: col.str.lower().map( + {"critical": 1, "high": 2, "medium": 3, "low": 4, "negligible": 5} + ), + ) + return df + + +def url_to_html_link(url: str) -> str: + if not url: + return "" + text = url.split("/")[-1].split("?")[0] + if not text: + text = "results" + return f'{text}' + + +def format_test_name_for_linewrap(text: str) -> str: + """Tweak the test name to improve line wrapping.""" + return f'{text}' + + +def format_test_status(text: str) -> str: + """Format the test status for better readability.""" + if text.lower().startswith("fail"): + color = "red" + elif text.lower() == "skipped": + color = "grey" + elif text.lower() in ("success", "ok", "passed", "pass"): + color = "green" + else: + color = "orange" + + return f'{text}' + + +def format_results_as_html_table(results) -> str: + if len(results) == 0: + return "

Nothing to report

" + results.columns = [col.replace("_", " ").title() for col in results.columns] + html = results.to_html( + index=False, + formatters={ + "Results Link": url_to_html_link, + "Test Name": format_test_name_for_linewrap, + "Test Status": format_test_status, + "Job Status": format_test_status, + "Status": format_test_status, + "Message": lambda m: m.replace("\n", " "), + "Identifier": lambda i: url_to_html_link( + "https://nvd.nist.gov/vuln/detail/" + i + ), + }, + escape=False, + border=0, + classes=["test-results-table"], + ) + return html + + +def backfill_skipped_statuses( + job_statuses: pd.DataFrame, pr_number: int, branch: str, commit_sha: str +): + """ + Fill in the job statuses for skipped jobs. + """ + + if pr_number == 0: + ref_param = f"REF={branch}" + workflow_name = "MasterCI" + else: + ref_param = f"PR={pr_number}" + workflow_name = "PR" + + status_file = f"result_{workflow_name.lower()}.json" + s3_path = f"https://{S3_BUCKET}.s3.amazonaws.com/{ref_param.replace('=', 's/')}/{commit_sha}/{status_file}" + response = requests.get(s3_path) + + if response.status_code != 200: + return job_statuses + + status_data = response.json() + skipped_jobs = [] + for job in status_data["results"]: + if job["status"] == "skipped" and len(job["links"]) > 0: + skipped_jobs.append( + { + "job_name": job["name"], + "job_status": job["status"], + "message": job["info"], + "results_link": job["links"][0], + } + ) + + return pd.concat([job_statuses, pd.DataFrame(skipped_jobs)], ignore_index=True) + + +def get_build_report_links( + job_statuses: pd.DataFrame, pr_number: int, branch: str, commit_sha: str +): + """ + Get the build report links for the given PR number, branch, and commit SHA. + + First checks if a build job submitted a success or skipped status. + If not available, it guesses the links. + """ + build_job_names = [ + "Build (amd_release)", + "Build (arm_release)", + "Docker server image", + "Docker keeper image", + ] + build_report_links = {} + + for job in job_statuses.itertuples(): + if ( + job.job_name in build_job_names + and job.job_status + in ( + "success", + "skipped", + ) + and job.results_link + ): + build_report_links[job.job_name] = job.results_link + + if 0 < len(build_report_links) < len(build_job_names): + # Only have some of the build jobs, guess the rest. + # (It was straightforward to force the build jobs to always appear in the cache, + # however doing the same for the docker image jobs is difficult.) + ref_job, ref_link = list(build_report_links.items())[0] + link_template = ref_link.replace( + urllib.parse.quote(ref_job, safe=""), "{job_name}" + ) + for job in build_job_names: + if job not in build_report_links: + build_report_links[job] = link_template.format(job_name=job) + + if len(build_report_links) > 0: + return build_report_links + + # No cache or build result was found, guess the links + if pr_number == 0: + ref_param = f"REF={branch}" + workflow_name = "MasterCI" + else: + ref_param = f"PR={pr_number}" + workflow_name = "PR" + + build_report_link_base = f"https://{S3_BUCKET}.s3.amazonaws.com/json.html?{ref_param}&sha={commit_sha}&name_0={urllib.parse.quote(workflow_name, safe='')}" + build_report_links = { + job_name: f"{build_report_link_base}&name_1={urllib.parse.quote(job_name, safe='')}" + for job_name in build_job_names + } + return build_report_links + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Create a combined CI report.") + parser.add_argument( # Need the full URL rather than just the ID to query the databases + "--actions-run-url", required=True, help="URL of the actions run" + ) + parser.add_argument( + "--pr-number", help="Pull request number for the S3 path", type=int + ) + parser.add_argument("--commit-sha", help="Commit SHA for the S3 path") + parser.add_argument( + "--no-upload", action="store_true", help="Do not upload the report" + ) + parser.add_argument( + "--known-fails", type=str, help="Path to the file with known fails" + ) + parser.add_argument( + "--cves", action="store_true", help="Get CVEs from Grype results" + ) + parser.add_argument( + "--mark-preview", action="store_true", help="Mark the report as a preview" + ) + return parser.parse_args() + + +def create_workflow_report( + actions_run_url: str, + pr_number: int = None, + commit_sha: str = None, + no_upload: bool = False, + known_fails: str = None, + check_cves: bool = False, + mark_preview: bool = False, +) -> str: + if pr_number is None or commit_sha is None: + run_details = get_run_details(actions_run_url) + if pr_number is None: + if len(run_details["pull_requests"]) > 0: + pr_number = run_details["pull_requests"][0]["number"] + else: + pr_number = 0 + if commit_sha is None: + commit_sha = run_details["head_commit"]["id"] + + host = os.getenv(DATABASE_HOST_VAR) + if not host: + print(f"{DATABASE_HOST_VAR} is not set") + user = os.getenv(DATABASE_USER_VAR) + if not user: + print(f"{DATABASE_USER_VAR} is not set") + password = os.getenv(DATABASE_PASSWORD_VAR) + if not password: + print(f"{DATABASE_PASSWORD_VAR} is not set") + if not GITHUB_TOKEN: + print("GITHUB_TOKEN is not set") + if not all([host, user, password, GITHUB_TOKEN]): + raise Exception("Required environment variables are not set") + + db_client = Client( + host=host, + user=user, + password=password, + port=9440, + secure="y", + verify=False, + settings={"use_numpy": True}, + ) + + run_details = get_run_details(actions_run_url) + branch_name = run_details.get("head_branch", "unknown branch") + + fail_results = { + "job_statuses": get_commit_statuses(commit_sha), + "checks_fails": get_checks_fails(db_client, commit_sha, branch_name), + "checks_known_fails": [], + "pr_new_fails": [], + "checks_errors": get_checks_errors(db_client, commit_sha, branch_name), + "regression_fails": get_regression_fails(db_client, actions_run_url), + "docker_images_cves": ( + [] if not check_cves else get_cves(pr_number, commit_sha, branch_name) + ), + } + + # get_cves returns ... in the case where no Grype result files were found. + # This might occur when run in preview mode. + cves_not_checked = not check_cves or fail_results["docker_images_cves"] is ... + + if known_fails: + if not os.path.exists(known_fails): + print(f"Known fails file {known_fails} not found.") + exit(1) + + with open(known_fails) as f: + known_fails = json.load(f) + + if known_fails: + fail_results["checks_known_fails"] = get_checks_known_fails( + db_client, commit_sha, branch_name, known_fails + ) + + if pr_number == 0: + pr_info_html = f"Release ({branch_name})" + else: + try: + pr_info = get_pr_info_from_number(pr_number) + pr_info_html = f""" + #{pr_info.get("number")} ({pr_info.get("base", {}).get('ref')} <- {pr_info.get("head", {}).get('ref')}) {pr_info.get("title")} + """ + fail_results["pr_new_fails"] = get_new_fails_this_pr( + db_client, + pr_info, + fail_results["checks_fails"], + fail_results["regression_fails"], + ) + except Exception as e: + pr_info_html = e + + fail_results["job_statuses"] = backfill_skipped_statuses( + fail_results["job_statuses"], pr_number, branch_name, commit_sha + ) + + high_cve_count = 0 + if not cves_not_checked and len(fail_results["docker_images_cves"]) > 0: + high_cve_count = ( + fail_results["docker_images_cves"]["severity"] + .str.lower() + .isin(("high", "critical")) + .sum() + ) + + # Set up the Jinja2 environment + template_dir = os.path.dirname(__file__) + + # Load the template + template = Environment(loader=FileSystemLoader(template_dir)).get_template( + "ci_run_report.html.jinja" + ) + + # Define the context for rendering + context = { + "title": "ClickHouse® CI Workflow Run Report", + "github_repo": GITHUB_REPO, + "s3_bucket": S3_BUCKET, + "pr_info_html": pr_info_html, + "pr_number": pr_number, + "workflow_id": actions_run_url.split("/")[-1], + "commit_sha": commit_sha, + "base_sha": "" if pr_number == 0 else pr_info.get("base", {}).get("sha"), + "date": f"{datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')} UTC", + "is_preview": mark_preview, + "counts": { + "jobs_status": f"{sum(fail_results['job_statuses']['job_status'].value_counts().get(x, 0) for x in ('failure', 'error'))} fail/error", + "checks_errors": len(fail_results["checks_errors"]), + "checks_new_fails": len(fail_results["checks_fails"]), + "regression_new_fails": len(fail_results["regression_fails"]), + "cves": "N/A" if cves_not_checked else f"{high_cve_count} high/critical", + "checks_known_fails": ( + "N/A" if not known_fails else len(fail_results["checks_known_fails"]) + ), + "pr_new_fails": len(fail_results["pr_new_fails"]), + }, + "build_report_links": get_build_report_links( + fail_results["job_statuses"], pr_number, branch_name, commit_sha + ), + "ci_jobs_status_html": format_results_as_html_table( + fail_results["job_statuses"] + ), + "checks_errors_html": format_results_as_html_table( + fail_results["checks_errors"] + ), + "checks_fails_html": format_results_as_html_table(fail_results["checks_fails"]), + "regression_fails_html": format_results_as_html_table( + fail_results["regression_fails"] + ), + "docker_images_cves_html": ( + "

Not Checked

" + if cves_not_checked + else format_results_as_html_table(fail_results["docker_images_cves"]) + ), + "checks_known_fails_html": ( + "

Not Checked

" + if not known_fails + else format_results_as_html_table(fail_results["checks_known_fails"]) + ), + "new_fails_html": format_results_as_html_table(fail_results["pr_new_fails"]), + } + + # Render the template with the context + rendered_html = template.render(context) + + report_name = "ci_run_report.html" + report_path = Path(report_name) + report_path.write_text(rendered_html, encoding="utf-8") + + if no_upload: + print(f"Report saved to {report_path}") + exit(0) + + if pr_number == 0: + report_destination_key = f"REFs/{branch_name}/{commit_sha}/{report_name}" + else: + report_destination_key = f"PRs/{pr_number}/{commit_sha}/{report_name}" + + # Upload the report to S3 + s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL")) + + try: + s3_client.put_object( + Bucket=S3_BUCKET, + Key=report_destination_key, + Body=rendered_html, + ContentType="text/html; charset=utf-8", + ) + except NoCredentialsError: + print("Credentials not available for S3 upload.") + + return f"https://s3.amazonaws.com/{S3_BUCKET}/" + report_destination_key + + +def main(): + args = parse_args() + + report_url = create_workflow_report( + args.actions_run_url, + args.pr_number, + args.commit_sha, + args.no_upload, + args.known_fails, + args.cves, + args.mark_preview, + ) + + print(report_url) + + +if __name__ == "__main__": + main() diff --git a/.github/actions/create_workflow_report/workflow_report_hook.sh b/.github/actions/create_workflow_report/workflow_report_hook.sh new file mode 100755 index 000000000000..55f5a1ede5c3 --- /dev/null +++ b/.github/actions/create_workflow_report/workflow_report_hook.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# This script is for generating preview reports when invoked as a post-hook from a praktika job +pip install clickhouse-driver==0.2.8 numpy==1.26.4 pandas==2.0.3 jinja2==3.1.5 +ARGS="--mark-preview --known-fails tests/broken_tests.json --cves --actions-run-url $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID --pr-number $PR_NUMBER" +CMD="python3 .github/actions/create_workflow_report/create_workflow_report.py" +$CMD $ARGS + diff --git a/.github/actions/docker_setup/action.yml b/.github/actions/docker_setup/action.yml new file mode 100644 index 000000000000..56f713fa59d1 --- /dev/null +++ b/.github/actions/docker_setup/action.yml @@ -0,0 +1,32 @@ +name: Docker setup +description: Setup docker +inputs: + test_name: + description: name of the test, used in determining ipv6 configs. + default: None + type: string +runs: + using: "composite" + steps: + - name: Docker IPv6 configuration + shell: bash + if: ${{ contains(inputs.test_name, 'Stateless') }} + env: + ipv6_subnet: 2001:3984:3989::/64 + run: | + # make sure docker uses proper IPv6 config + sudo touch /etc/docker/daemon.json + sudo chown ubuntu:ubuntu /etc/docker/daemon.json + sudo cat < /etc/docker/daemon.json + { + "ipv6": true, + "fixed-cidr-v6": "${{ env.ipv6_subnet }}" + } + EOT + sudo chown root:root /etc/docker/daemon.json + sudo systemctl restart docker + sudo systemctl status docker + - name: Docker info + shell: bash + run: | + docker info diff --git a/.github/actions/runner_setup/action.yml b/.github/actions/runner_setup/action.yml new file mode 100644 index 000000000000..5a229fdd47e7 --- /dev/null +++ b/.github/actions/runner_setup/action.yml @@ -0,0 +1,19 @@ +name: Setup +description: Setup environment +runs: + using: "composite" + steps: + - name: Setup zram + shell: bash + run: | + sudo modprobe zram + MemTotal=$(grep -Po "(?<=MemTotal:)\s+\d+" /proc/meminfo) # KiB + Percent=200 + ZRAM_SIZE=$(($MemTotal / 1024 / 1024 * $Percent / 100)) # Convert to GiB + .github/retry.sh 30 2 sudo zramctl --size ${ZRAM_SIZE}GiB --algorithm zstd /dev/zram0 + sudo mkswap /dev/zram0 && sudo swapon -p 100 /dev/zram0 + sudo sysctl vm.swappiness=200 + - name: Install awscli + shell: bash + run: | + .github/retry.sh 10 30 sudo apt-get install -y awscli diff --git a/.github/grype/parse_vulnerabilities_grype.py b/.github/grype/parse_vulnerabilities_grype.py new file mode 100644 index 000000000000..fec2ef3bfac7 --- /dev/null +++ b/.github/grype/parse_vulnerabilities_grype.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +import json + +from testflows.core import * + +xfails = {} + + +@Name("docker vulnerabilities") +@XFails(xfails) +@TestModule +def docker_vulnerabilities(self): + with Given("I gather grype scan results"): + with open("./result.json", "r") as f: + results = json.load(f) + + for vulnerability in results["matches"]: + with Test( + f"{vulnerability['vulnerability']['id']}@{vulnerability['vulnerability']['namespace']},{vulnerability['vulnerability']['severity']}", + flags=TE, + ): + note(vulnerability) + critical_levels = set(["HIGH", "CRITICAL"]) + if vulnerability['vulnerability']["severity"].upper() in critical_levels: + with Then( + f"Found vulnerability of {vulnerability['vulnerability']['severity']} severity" + ): + result(Fail) + + +if main(): + docker_vulnerabilities() diff --git a/.github/grype/run_grype_scan.sh b/.github/grype/run_grype_scan.sh new file mode 100755 index 000000000000..af428e37d669 --- /dev/null +++ b/.github/grype/run_grype_scan.sh @@ -0,0 +1,18 @@ +set -x +set -e + +IMAGE=$1 + +GRYPE_VERSION=${GRYPE_VERSION:-"v0.92.2"} + +docker pull $IMAGE +docker pull anchore/grype:${GRYPE_VERSION} + +docker run \ + --rm --volume /var/run/docker.sock:/var/run/docker.sock \ + --name Grype anchore/grype:${GRYPE_VERSION} \ + --scope all-layers \ + -o json \ + $IMAGE > result.json + +ls -sh diff --git a/.github/grype/transform_and_upload_results_s3.sh b/.github/grype/transform_and_upload_results_s3.sh new file mode 100755 index 000000000000..38674d7a2a26 --- /dev/null +++ b/.github/grype/transform_and_upload_results_s3.sh @@ -0,0 +1,20 @@ +DOCKER_IMAGE=$(echo "$DOCKER_IMAGE" | sed 's/[\/:]/_/g') + +if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/grype/$DOCKER_IMAGE" +else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA/grype/$DOCKER_IMAGE" +fi + +S3_PATH="s3://$S3_BUCKET/$PREFIX" +HTTPS_RESULTS_PATH="https://$S3_BUCKET.s3.amazonaws.com/index.html#$PREFIX/" +HTTPS_REPORT_PATH="https://s3.amazonaws.com/$S3_BUCKET/$PREFIX/results.html" +echo "https_report_path=$HTTPS_REPORT_PATH" >> $GITHUB_OUTPUT + +tfs --no-colors transform nice raw.log nice.log.txt +tfs --no-colors report results -a $HTTPS_RESULTS_PATH raw.log - --copyright "Altinity LTD" | tfs --no-colors document convert > results.html + +aws s3 cp --no-progress nice.log.txt $S3_PATH/nice.log.txt --content-type "text/plain; charset=utf-8" || echo "nice log file not found". +aws s3 cp --no-progress results.html $S3_PATH/results.html || echo "results file not found". +aws s3 cp --no-progress raw.log $S3_PATH/raw.log || echo "raw.log file not found". +aws s3 cp --no-progress result.json $S3_PATH/result.json --content-type "text/plain; charset=utf-8" || echo "result.json not found". \ No newline at end of file diff --git a/.github/retry.sh b/.github/retry.sh new file mode 100755 index 000000000000..566c2cf11315 --- /dev/null +++ b/.github/retry.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Execute command until exitcode is 0 or +# maximum number of retries is reached +# Example: +# ./retry +retries=$1 +delay=$2 +command="${@:3}" +exitcode=0 +try=0 +until [ "$try" -ge $retries ] +do + echo "$command" + eval "$command" + exitcode=$? + if [ $exitcode -eq 0 ]; then + break + fi + try=$((try+1)) + sleep $2 +done +exit $exitcode diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 000000000000..56415c2a7478 --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,13 @@ +## Scheduled Build Run Results + +Results for **the latest** release_workflow scheduled runs. + +| Branch | Status | +| ------------ | - | +| **`antalya`** | [![antalya](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=antalya&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aantalya) | +| **`project-antalya-24.12.2`** | [![project-antalya-24.12.2](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=project-antalya-24.12.2&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aproject-antalya-24.12.2) | +| **`customizations/22.8.21`** | [![customizations/22.8.21](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/22.8.21&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/22.8.21) | +| **`customizations/23.3.19`** | [![customizations/23.3.19](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/23.3.19&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.3.19) | +| **`customizations/23.8.16`** | [![customizations/23.8.16](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/23.8.16&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.8.16) | +| **`customizations/24.3.14`** | [![customizations/24.3.14](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/24.3.14&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.3.14) | +| **`customizations/24.8.11`** | [![customizations/24.8.11](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/24.8.11&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.8.11) | diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 294557a38b0f..7cdc4eee46c0 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -3,6 +3,13 @@ name: BackportPR on: + workflow_dispatch: + inputs: + no_cache: + description: Run without cache + required: false + type: boolean + default: false pull_request: branches: ['2[1-9].[1-9][0-9]', '2[1-9].[1-9]'] @@ -10,8 +17,22 @@ env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 DISABLE_CI_MERGE_COMMIT: ${{ vars.DISABLE_CI_MERGE_COMMIT || '0' }} - DISABLE_CI_CACHE: ${{ vars.DISABLE_CI_CACHE || '0' }} + DISABLE_CI_CACHE: ${{ github.event.inputs.no_cache || '0' }} CHECKOUT_REF: ${{ vars.DISABLE_CI_MERGE_COMMIT == '1' && github.event.pull_request.head.sha || '' }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }} + AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }} + AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }} + AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + GH_TOKEN: ${{ github.token }} # Allow updating GH commit statuses and PR comments to post an actual job reports link permissions: write-all @@ -19,7 +40,7 @@ permissions: write-all jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -30,6 +51,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -56,7 +97,7 @@ jobs: fi dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -68,6 +109,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -94,7 +142,7 @@ jobs: fi dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -106,6 +154,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -132,7 +187,7 @@ jobs: fi build_amd_debug: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} name: "Build (amd_debug)" @@ -144,6 +199,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -170,7 +232,7 @@ jobs: fi build_amd_release: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} name: "Build (amd_release)" @@ -182,6 +244,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -208,7 +277,7 @@ jobs: fi build_amd_asan: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} name: "Build (amd_asan)" @@ -220,6 +289,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -246,7 +322,7 @@ jobs: fi build_amd_tsan: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} name: "Build (amd_tsan)" @@ -258,6 +334,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -284,7 +367,7 @@ jobs: fi build_arm_release: - runs-on: [self-hosted, builder-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} name: "Build (arm_release)" @@ -296,6 +379,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -322,7 +412,7 @@ jobs: fi build_amd_darwin: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }} name: "Build (amd_darwin)" @@ -334,6 +424,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_darwin)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -360,7 +457,7 @@ jobs: fi build_arm_darwin: - runs-on: [self-hosted, builder-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }} name: "Build (arm_darwin)" @@ -372,6 +469,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_darwin)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -398,7 +502,7 @@ jobs: fi docker_server_image: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} name: "Docker server image" @@ -410,6 +514,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker server image" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -436,7 +547,7 @@ jobs: fi docker_keeper_image: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} name: "Docker keeper image" @@ -448,6 +559,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker keeper image" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -474,7 +592,7 @@ jobs: fi install_packages_amd_debug: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX2RlYnVnKQ==') }} name: "Install packages (amd_debug)" @@ -486,6 +604,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -512,7 +637,7 @@ jobs: fi compatibility_check_release: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }} name: "Compatibility check (release)" @@ -524,6 +649,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (release)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -550,7 +682,7 @@ jobs: fi compatibility_check_aarch64: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_release] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }} name: "Compatibility check (aarch64)" @@ -562,6 +694,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (aarch64)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -588,7 +727,7 @@ jobs: fi stateless_tests_amd_asan_distributed_plan_parallel_1_2: - runs-on: [self-hosted, amd-medium-cpu] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" @@ -600,6 +739,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -626,7 +772,7 @@ jobs: fi stateless_tests_amd_asan_distributed_plan_parallel_2_2: - runs-on: [self-hosted, amd-medium-cpu] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" @@ -638,6 +784,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -664,7 +817,7 @@ jobs: fi stateless_tests_amd_asan_distributed_plan_sequential: - runs-on: [self-hosted, amd-small-mem] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} name: "Stateless tests (amd_asan, distributed plan, sequential)" @@ -676,6 +829,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -702,7 +862,7 @@ jobs: fi stress_test_amd_tsan: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} name: "Stress test (amd_tsan)" @@ -714,6 +874,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -740,7 +907,7 @@ jobs: fi integration_tests_amd_asan_old_analyzer_1_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} name: "Integration tests (amd_asan, old analyzer, 1/6)" @@ -752,6 +919,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -778,7 +952,7 @@ jobs: fi integration_tests_amd_asan_old_analyzer_2_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} name: "Integration tests (amd_asan, old analyzer, 2/6)" @@ -790,6 +964,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -816,7 +997,7 @@ jobs: fi integration_tests_amd_asan_old_analyzer_3_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} name: "Integration tests (amd_asan, old analyzer, 3/6)" @@ -828,6 +1009,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -854,7 +1042,7 @@ jobs: fi integration_tests_amd_asan_old_analyzer_4_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} name: "Integration tests (amd_asan, old analyzer, 4/6)" @@ -866,6 +1054,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -892,7 +1087,7 @@ jobs: fi integration_tests_amd_asan_old_analyzer_5_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} name: "Integration tests (amd_asan, old analyzer, 5/6)" @@ -904,6 +1099,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -930,7 +1132,7 @@ jobs: fi integration_tests_amd_asan_old_analyzer_6_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} name: "Integration tests (amd_asan, old analyzer, 6/6)" @@ -942,6 +1144,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -968,7 +1177,7 @@ jobs: fi integration_tests_amd_tsan_1_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} name: "Integration tests (amd_tsan, 1/6)" @@ -980,6 +1189,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1006,7 +1222,7 @@ jobs: fi integration_tests_amd_tsan_2_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} name: "Integration tests (amd_tsan, 2/6)" @@ -1018,6 +1234,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1044,7 +1267,7 @@ jobs: fi integration_tests_amd_tsan_3_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} name: "Integration tests (amd_tsan, 3/6)" @@ -1056,6 +1279,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1082,7 +1312,7 @@ jobs: fi integration_tests_amd_tsan_4_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} name: "Integration tests (amd_tsan, 4/6)" @@ -1094,6 +1324,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1120,7 +1357,7 @@ jobs: fi integration_tests_amd_tsan_5_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} name: "Integration tests (amd_tsan, 5/6)" @@ -1132,6 +1369,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1158,7 +1402,7 @@ jobs: fi integration_tests_amd_tsan_6_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} name: "Integration tests (amd_tsan, 6/6)" @@ -1170,6 +1414,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1196,7 +1447,7 @@ jobs: fi finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_arm_release, build_amd_darwin, build_arm_darwin, docker_server_image, docker_keeper_image, install_packages_amd_debug, compatibility_check_release, compatibility_check_aarch64, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_distributed_plan_sequential, stress_test_amd_tsan, integration_tests_amd_asan_old_analyzer_1_6, integration_tests_amd_asan_old_analyzer_2_6, integration_tests_amd_asan_old_analyzer_3_6, integration_tests_amd_asan_old_analyzer_4_6, integration_tests_amd_asan_old_analyzer_5_6, integration_tests_amd_asan_old_analyzer_6_6, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6] if: ${{ !cancelled() }} name: "Finish Workflow" @@ -1208,6 +1459,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1232,3 +1490,126 @@ jobs: else python3 -m praktika run 'Finish Workflow' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log fi + +########################################################################################## +##################################### ALTINITY JOBS ###################################### +########################################################################################## + GrypeScanServer: + needs: [config_workflow, docker_server_image] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + strategy: + fail-fast: false + matrix: + suffix: ['', '-alpine'] + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-server + version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + tag-suffix: ${{ matrix.suffix }} + GrypeScanKeeper: + needs: [config_workflow, docker_keeper_image] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-keeper + version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + + RegressionTestsRelease: + needs: [config_workflow, build_amd_release] + if: ${{ !failure() && !cancelled() && !contains(github.event.pull_request.body, '[x]

- + diff --git a/programs/server/merges.html b/programs/server/merges.html index 119fb058b0ba..6b1686973d25 100644 --- a/programs/server/merges.html +++ b/programs/server/merges.html @@ -3,7 +3,7 @@ ClickHouse Merges Visualizer - + + {{ title }} + + + + +

+ +

{{ title }}

+ + + + + + + + + + + + + + + + + + + + + + + +
Pull Request{{ pr_info_html }}
Workflow Run{{ workflow_id }}
Commit{{ commit_sha }}
Build Report{% for job_name, link in build_report_links.items() %}[{{ job_name }}] {% endfor %}
Date {{ date }}
+ {% if is_preview %} +

This is a preview. The workflow is not yet finished.

+ {% endif %} +

Table of Contents

+ + + {%- if pr_number != 0 -%} +

New Fails in PR

+

Compared with base sha {{ base_sha }}

+ {{ new_fails_html }} + {%- endif %} + +

CI Jobs Status

+ {{ ci_jobs_status_html }} + +

Checks Errors

+ {{ checks_errors_html }} + +

Checks New Fails

+ {{ checks_fails_html }} + +

Regression New Fails

+ {{ regression_fails_html }} + +

Docker Images CVEs

+ {{ docker_images_cves_html }} + +

Checks Known Fails

+

+ Fail reason conventions:
+ KNOWN - Accepted fail and fix is not planned
+ INVESTIGATE - We don't know why it fails
+ NEEDSFIX - Investigation done and a fix is needed to make it pass
+

+ {{ checks_known_fails_html }} + + + + \ No newline at end of file diff --git a/.github/actions/create_workflow_report/create_workflow_report.py b/.github/actions/create_workflow_report/create_workflow_report.py new file mode 100755 index 000000000000..56f7fd829525 --- /dev/null +++ b/.github/actions/create_workflow_report/create_workflow_report.py @@ -0,0 +1,864 @@ +#!/usr/bin/env python3 +import argparse +import os +from pathlib import Path +from itertools import combinations +import json +from datetime import datetime +from functools import lru_cache +from glob import glob +import urllib.parse + +import pandas as pd +from jinja2 import Environment, FileSystemLoader +import requests +from clickhouse_driver import Client +import boto3 +from botocore.exceptions import NoCredentialsError + +DATABASE_HOST_VAR = "CHECKS_DATABASE_HOST" +DATABASE_USER_VAR = "CLICKHOUSE_TEST_STAT_LOGIN" +DATABASE_PASSWORD_VAR = "CLICKHOUSE_TEST_STAT_PASSWORD" +S3_BUCKET = "altinity-build-artifacts" +GITHUB_REPO = "Altinity/ClickHouse" +GITHUB_TOKEN = os.getenv("GITHUB_TOKEN") or os.getenv("GH_TOKEN") + +def get_commit_statuses(sha: str) -> pd.DataFrame: + """ + Fetch commit statuses for a given SHA and return as a pandas DataFrame. + Handles pagination to get all statuses. + + Args: + sha (str): Commit SHA to fetch statuses for. + + Returns: + pd.DataFrame: DataFrame containing all statuses. + """ + headers = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json", + } + + url = f"https://api.github.com/repos/{GITHUB_REPO}/commits/{sha}/statuses" + + all_data = [] + + while url: + response = requests.get(url, headers=headers) + + if response.status_code != 200: + raise Exception( + f"Failed to fetch statuses: {response.status_code} {response.text}" + ) + + data = response.json() + all_data.extend(data) + + # Check for pagination links in the response headers + if "Link" in response.headers: + links = response.headers["Link"].split(",") + next_url = None + + for link in links: + parts = link.strip().split(";") + if len(parts) == 2 and 'rel="next"' in parts[1]: + next_url = parts[0].strip("<>") + break + + url = next_url + else: + url = None + + # Parse relevant fields + parsed = [ + { + "job_name": item["context"], + "job_status": item["state"], + "message": item["description"], + "results_link": item["target_url"], + } + for item in all_data + ] + + # Create DataFrame + df = pd.DataFrame(parsed) + + # Drop duplicates keeping the first occurrence (newest status for each context) + # GitHub returns statuses in reverse chronological order + df = df.drop_duplicates(subset=["job_name"], keep="first") + + # Sort by status and job name + return df.sort_values( + by=["job_status", "job_name"], ascending=[True, True] + ).reset_index(drop=True) + + +def get_pr_info_from_number(pr_number: str) -> dict: + """ + Fetch pull request information for a given PR number. + + Args: + pr_number (str): Pull request number to fetch information for. + + Returns: + dict: Dictionary containing PR information. + """ + headers = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json", + } + + url = f"https://api.github.com/repos/{GITHUB_REPO}/pulls/{pr_number}" + response = requests.get(url, headers=headers) + + if response.status_code != 200: + raise Exception( + f"Failed to fetch pull request info: {response.status_code} {response.text}" + ) + + return response.json() + + +@lru_cache +def get_run_details(run_url: str) -> dict: + """ + Fetch run details for a given run URL. + """ + run_id = run_url.split("/")[-1] + + headers = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json", + } + + url = f"https://api.github.com/repos/{GITHUB_REPO}/actions/runs/{run_id}" + response = requests.get(url, headers=headers) + + if response.status_code != 200: + raise Exception( + f"Failed to fetch run details: {response.status_code} {response.text}" + ) + + return response.json() + + +def get_checks_fails(client: Client, commit_sha: str, branch_name: str): + """ + Get tests that did not succeed for the given commit and branch. + Exclude checks that have status 'error' as they are counted in get_checks_errors. + """ + query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link + FROM ( + SELECT + argMax(check_status, check_start_time) as job_status, + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE test_status IN ('FAIL', 'ERROR') + AND job_status!='error' + ORDER BY job_name, test_name + """ + return client.query_dataframe(query) + + +def get_checks_known_fails( + client: Client, commit_sha: str, branch_name: str, known_fails: dict +): + """ + Get tests that are known to fail for the given commit and branch. + """ + if len(known_fails) == 0: + return pd.DataFrame() + + query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link + FROM ( + SELECT + argMax(check_status, check_start_time) as job_status, + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE test_status='BROKEN' + AND test_name IN ({','.join(f"'{test}'" for test in known_fails.keys())}) + ORDER BY job_name, test_name + """ + + df = client.query_dataframe(query) + + df.insert( + len(df.columns) - 1, + "reason", + df["test_name"] + .astype(str) + .apply( + lambda test_name: known_fails[test_name].get("reason", "No reason given") + ), + ) + + return df + + +def get_checks_errors(client: Client, commit_sha: str, branch_name: str): + """ + Get checks that have status 'error' for the given commit and branch. + """ + query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link + FROM ( + SELECT + argMax(check_status, check_start_time) as job_status, + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE job_status=='error' + ORDER BY job_name, test_name + """ + return client.query_dataframe(query) + + +def drop_prefix_rows(df, column_to_clean): + """ + Drop rows from the dataframe if: + - the row matches another row completely except for the specified column + - the specified column of that row is a prefix of the same column in another row + """ + to_drop = set() + reference_columns = [col for col in df.columns if col != column_to_clean] + for (i, row_1), (j, row_2) in combinations(df.iterrows(), 2): + if all(row_1[col] == row_2[col] for col in reference_columns): + if row_2[column_to_clean].startswith(row_1[column_to_clean]): + to_drop.add(i) + elif row_1[column_to_clean].startswith(row_2[column_to_clean]): + to_drop.add(j) + return df.drop(to_drop) + + +def get_regression_fails(client: Client, job_url: str): + """ + Get regression tests that did not succeed for the given job URL. + """ + # If you rename the alias for report_url, also update the formatters in format_results_as_html_table + # Nested SELECT handles test reruns + query = f"""SELECT arch, job_name, status, test_name, results_link + FROM ( + SELECT + architecture as arch, + test_name, + argMax(result, start_time) AS status, + job_name, + report_url as results_link, + job_url + FROM `gh-data`.clickhouse_regression_results + GROUP BY architecture, test_name, job_url, job_name, report_url + ORDER BY length(test_name) DESC + ) + WHERE job_url LIKE '{job_url}%' + AND status IN ('Fail', 'Error') + """ + df = client.query_dataframe(query) + df = drop_prefix_rows(df, "test_name") + df["job_name"] = df["job_name"].str.title() + return df + + +def get_new_fails_this_pr( + client: Client, + pr_info: dict, + checks_fails: pd.DataFrame, + regression_fails: pd.DataFrame, +): + """ + Get tests that failed in the PR but passed in the base branch. + Compares both checks and regression test results. + """ + base_sha = pr_info.get("base", {}).get("sha") + if not base_sha: + raise Exception("No base SHA found for PR") + + # Modify tables to have the same columns + if len(checks_fails) > 0: + checks_fails = checks_fails.copy().drop(columns=["job_status"]) + if len(regression_fails) > 0: + regression_fails = regression_fails.copy() + regression_fails["job_name"] = regression_fails.apply( + lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1 + ) + regression_fails["test_status"] = regression_fails["status"] + + # Combine both types of fails and select only desired columns + desired_columns = ["job_name", "test_name", "test_status", "results_link"] + all_pr_fails = pd.concat([checks_fails, regression_fails], ignore_index=True)[ + desired_columns + ] + if len(all_pr_fails) == 0: + return pd.DataFrame() + + # Get all checks from the base branch that didn't fail + base_checks_query = f"""SELECT job_name, status as test_status, test_name, results_link + FROM ( + SELECT + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{base_sha}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE test_status NOT IN ('FAIL', 'ERROR') + ORDER BY job_name, test_name + """ + base_checks = client.query_dataframe(base_checks_query) + + # Get regression results from base branch that didn't fail + base_regression_query = f"""SELECT arch, job_name, status, test_name, results_link + FROM ( + SELECT + architecture as arch, + test_name, + argMax(result, start_time) AS status, + job_url, + job_name, + report_url as results_link + FROM `gh-data`.clickhouse_regression_results + WHERE results_link LIKE'%/{base_sha}/%' + GROUP BY architecture, test_name, job_url, job_name, report_url + ORDER BY length(test_name) DESC + ) + WHERE status NOT IN ('Fail', 'Error') + """ + base_regression = client.query_dataframe(base_regression_query) + if len(base_regression) > 0: + base_regression["job_name"] = base_regression.apply( + lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1 + ) + base_regression["test_status"] = base_regression["status"] + base_regression = base_regression.drop(columns=["arch", "status"]) + + # Combine base results + base_results = pd.concat([base_checks, base_regression], ignore_index=True) + + # Find tests that failed in PR but passed in base + pr_failed_tests = set(zip(all_pr_fails["job_name"], all_pr_fails["test_name"])) + base_passed_tests = set(zip(base_results["job_name"], base_results["test_name"])) + + new_fails = pr_failed_tests.intersection(base_passed_tests) + + # Filter PR results to only include new fails + mask = all_pr_fails.apply( + lambda row: (row["job_name"], row["test_name"]) in new_fails, axis=1 + ) + new_fails_df = all_pr_fails[mask] + + return new_fails_df + + +@lru_cache +def get_workflow_config() -> dict: + workflow_config_files = glob("./ci/tmp/workflow_config*.json") + if len(workflow_config_files) == 0: + raise Exception("No workflow config file found") + if len(workflow_config_files) > 1: + raise Exception("Multiple workflow config files found") + with open(workflow_config_files[0], "r") as f: + return json.load(f) + + +def get_cached_job(job_name: str) -> dict: + workflow_config = get_workflow_config() + return workflow_config["cache_jobs"].get(job_name, {}) + + +def get_cves(pr_number, commit_sha, branch): + """ + Fetch Grype results from S3. + + If no results are available for download, returns ... (Ellipsis). + """ + s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL")) + prefixes_to_check = set() + + def format_prefix(pr_number, commit_sha, branch): + if pr_number == 0: + return f"REFs/{branch}/{commit_sha}/grype/" + else: + return f"PRs/{pr_number}/{commit_sha}/grype/" + + cached_server_job = get_cached_job("Docker server image") + if cached_server_job: + prefixes_to_check.add( + format_prefix( + cached_server_job["pr_number"], + cached_server_job["sha"], + cached_server_job["branch"], + ) + ) + cached_keeper_job = get_cached_job("Docker keeper image") + if cached_keeper_job: + prefixes_to_check.add( + format_prefix( + cached_keeper_job["pr_number"], + cached_keeper_job["sha"], + cached_keeper_job["branch"], + ) + ) + + if not prefixes_to_check: + prefixes_to_check = {format_prefix(pr_number, commit_sha, branch)} + + grype_result_dirs = [] + for s3_prefix in prefixes_to_check: + try: + response = s3_client.list_objects_v2( + Bucket=S3_BUCKET, Prefix=s3_prefix, Delimiter="/" + ) + grype_result_dirs.extend( + content["Prefix"] for content in response.get("CommonPrefixes", []) + ) + except Exception as e: + print(f"Error listing S3 objects at {s3_prefix}: {e}") + continue + + if len(grype_result_dirs) == 0: + # We were asked to check the CVE data, but none was found, + # maybe this is a preview report and grype results are not available yet + return ... + + results = [] + for path in grype_result_dirs: + file_key = f"{path}result.json" + try: + file_response = s3_client.get_object(Bucket=S3_BUCKET, Key=file_key) + content = file_response["Body"].read().decode("utf-8") + results.append(json.loads(content)) + except Exception as e: + print(f"Error getting S3 object at {file_key}: {e}") + continue + + rows = [] + for scan_result in results: + for match in scan_result["matches"]: + rows.append( + { + "docker_image": scan_result["source"]["target"]["userInput"], + "severity": match["vulnerability"]["severity"], + "identifier": match["vulnerability"]["id"], + "namespace": match["vulnerability"]["namespace"], + } + ) + + if len(rows) == 0: + return pd.DataFrame() + + df = pd.DataFrame(rows).drop_duplicates() + df = df.sort_values( + by="severity", + key=lambda col: col.str.lower().map( + {"critical": 1, "high": 2, "medium": 3, "low": 4, "negligible": 5} + ), + ) + return df + + +def url_to_html_link(url: str) -> str: + if not url: + return "" + text = url.split("/")[-1].split("?")[0] + if not text: + text = "results" + return f'{text}' + + +def format_test_name_for_linewrap(text: str) -> str: + """Tweak the test name to improve line wrapping.""" + return f'{text}' + + +def format_test_status(text: str) -> str: + """Format the test status for better readability.""" + if text.lower().startswith("fail"): + color = "red" + elif text.lower() == "skipped": + color = "grey" + elif text.lower() in ("success", "ok", "passed", "pass"): + color = "green" + else: + color = "orange" + + return f'{text}' + + +def format_results_as_html_table(results) -> str: + if len(results) == 0: + return "

Nothing to report

" + results.columns = [col.replace("_", " ").title() for col in results.columns] + html = results.to_html( + index=False, + formatters={ + "Results Link": url_to_html_link, + "Test Name": format_test_name_for_linewrap, + "Test Status": format_test_status, + "Job Status": format_test_status, + "Status": format_test_status, + "Message": lambda m: m.replace("\n", " "), + "Identifier": lambda i: url_to_html_link( + "https://nvd.nist.gov/vuln/detail/" + i + ), + }, + escape=False, + border=0, + classes=["test-results-table"], + ) + return html + + +def backfill_skipped_statuses( + job_statuses: pd.DataFrame, pr_number: int, branch: str, commit_sha: str +): + """ + Fill in the job statuses for skipped jobs. + """ + + if pr_number == 0: + ref_param = f"REF={branch}" + workflow_name = "MasterCI" + else: + ref_param = f"PR={pr_number}" + workflow_name = "PR" + + status_file = f"result_{workflow_name.lower()}.json" + s3_path = f"https://{S3_BUCKET}.s3.amazonaws.com/{ref_param.replace('=', 's/')}/{commit_sha}/{status_file}" + response = requests.get(s3_path) + + if response.status_code != 200: + return job_statuses + + status_data = response.json() + skipped_jobs = [] + for job in status_data["results"]: + if job["status"] == "skipped" and len(job["links"]) > 0: + skipped_jobs.append( + { + "job_name": job["name"], + "job_status": job["status"], + "message": job["info"], + "results_link": job["links"][0], + } + ) + + return pd.concat([job_statuses, pd.DataFrame(skipped_jobs)], ignore_index=True) + + +def get_build_report_links( + job_statuses: pd.DataFrame, pr_number: int, branch: str, commit_sha: str +): + """ + Get the build report links for the given PR number, branch, and commit SHA. + + First checks if a build job submitted a success or skipped status. + If not available, it guesses the links. + """ + build_job_names = [ + "Build (amd_release)", + "Build (arm_release)", + "Docker server image", + "Docker keeper image", + ] + build_report_links = {} + + for job in job_statuses.itertuples(): + if ( + job.job_name in build_job_names + and job.job_status + in ( + "success", + "skipped", + ) + and job.results_link + ): + build_report_links[job.job_name] = job.results_link + + if 0 < len(build_report_links) < len(build_job_names): + # Only have some of the build jobs, guess the rest. + # (It was straightforward to force the build jobs to always appear in the cache, + # however doing the same for the docker image jobs is difficult.) + ref_job, ref_link = list(build_report_links.items())[0] + link_template = ref_link.replace( + urllib.parse.quote(ref_job, safe=""), "{job_name}" + ) + for job in build_job_names: + if job not in build_report_links: + build_report_links[job] = link_template.format(job_name=job) + + if len(build_report_links) > 0: + return build_report_links + + # No cache or build result was found, guess the links + if pr_number == 0: + ref_param = f"REF={branch}" + workflow_name = "MasterCI" + else: + ref_param = f"PR={pr_number}" + workflow_name = "PR" + + build_report_link_base = f"https://{S3_BUCKET}.s3.amazonaws.com/json.html?{ref_param}&sha={commit_sha}&name_0={urllib.parse.quote(workflow_name, safe='')}" + build_report_links = { + job_name: f"{build_report_link_base}&name_1={urllib.parse.quote(job_name, safe='')}" + for job_name in build_job_names + } + return build_report_links + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Create a combined CI report.") + parser.add_argument( # Need the full URL rather than just the ID to query the databases + "--actions-run-url", required=True, help="URL of the actions run" + ) + parser.add_argument( + "--pr-number", help="Pull request number for the S3 path", type=int + ) + parser.add_argument("--commit-sha", help="Commit SHA for the S3 path") + parser.add_argument( + "--no-upload", action="store_true", help="Do not upload the report" + ) + parser.add_argument( + "--known-fails", type=str, help="Path to the file with known fails" + ) + parser.add_argument( + "--cves", action="store_true", help="Get CVEs from Grype results" + ) + parser.add_argument( + "--mark-preview", action="store_true", help="Mark the report as a preview" + ) + return parser.parse_args() + + +def create_workflow_report( + actions_run_url: str, + pr_number: int = None, + commit_sha: str = None, + no_upload: bool = False, + known_fails: str = None, + check_cves: bool = False, + mark_preview: bool = False, +) -> str: + if pr_number is None or commit_sha is None: + run_details = get_run_details(actions_run_url) + if pr_number is None: + if len(run_details["pull_requests"]) > 0: + pr_number = run_details["pull_requests"][0]["number"] + else: + pr_number = 0 + if commit_sha is None: + commit_sha = run_details["head_commit"]["id"] + + host = os.getenv(DATABASE_HOST_VAR) + if not host: + print(f"{DATABASE_HOST_VAR} is not set") + user = os.getenv(DATABASE_USER_VAR) + if not user: + print(f"{DATABASE_USER_VAR} is not set") + password = os.getenv(DATABASE_PASSWORD_VAR) + if not password: + print(f"{DATABASE_PASSWORD_VAR} is not set") + if not GITHUB_TOKEN: + print("GITHUB_TOKEN is not set") + if not all([host, user, password, GITHUB_TOKEN]): + raise Exception("Required environment variables are not set") + + db_client = Client( + host=host, + user=user, + password=password, + port=9440, + secure="y", + verify=False, + settings={"use_numpy": True}, + ) + + run_details = get_run_details(actions_run_url) + branch_name = run_details.get("head_branch", "unknown branch") + + fail_results = { + "job_statuses": get_commit_statuses(commit_sha), + "checks_fails": get_checks_fails(db_client, commit_sha, branch_name), + "checks_known_fails": [], + "pr_new_fails": [], + "checks_errors": get_checks_errors(db_client, commit_sha, branch_name), + "regression_fails": get_regression_fails(db_client, actions_run_url), + "docker_images_cves": ( + [] if not check_cves else get_cves(pr_number, commit_sha, branch_name) + ), + } + + # get_cves returns ... in the case where no Grype result files were found. + # This might occur when run in preview mode. + cves_not_checked = not check_cves or fail_results["docker_images_cves"] is ... + + if known_fails: + if not os.path.exists(known_fails): + print(f"Known fails file {known_fails} not found.") + exit(1) + + with open(known_fails) as f: + known_fails = json.load(f) + + if known_fails: + fail_results["checks_known_fails"] = get_checks_known_fails( + db_client, commit_sha, branch_name, known_fails + ) + + if pr_number == 0: + pr_info_html = f"Release ({branch_name})" + else: + try: + pr_info = get_pr_info_from_number(pr_number) + pr_info_html = f""" + #{pr_info.get("number")} ({pr_info.get("base", {}).get('ref')} <- {pr_info.get("head", {}).get('ref')}) {pr_info.get("title")} + """ + fail_results["pr_new_fails"] = get_new_fails_this_pr( + db_client, + pr_info, + fail_results["checks_fails"], + fail_results["regression_fails"], + ) + except Exception as e: + pr_info_html = e + + fail_results["job_statuses"] = backfill_skipped_statuses( + fail_results["job_statuses"], pr_number, branch_name, commit_sha + ) + + high_cve_count = 0 + if not cves_not_checked and len(fail_results["docker_images_cves"]) > 0: + high_cve_count = ( + fail_results["docker_images_cves"]["severity"] + .str.lower() + .isin(("high", "critical")) + .sum() + ) + + # Set up the Jinja2 environment + template_dir = os.path.dirname(__file__) + + # Load the template + template = Environment(loader=FileSystemLoader(template_dir)).get_template( + "ci_run_report.html.jinja" + ) + + # Define the context for rendering + context = { + "title": "ClickHouse® CI Workflow Run Report", + "github_repo": GITHUB_REPO, + "s3_bucket": S3_BUCKET, + "pr_info_html": pr_info_html, + "pr_number": pr_number, + "workflow_id": actions_run_url.split("/")[-1], + "commit_sha": commit_sha, + "base_sha": "" if pr_number == 0 else pr_info.get("base", {}).get("sha"), + "date": f"{datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')} UTC", + "is_preview": mark_preview, + "counts": { + "jobs_status": f"{sum(fail_results['job_statuses']['job_status'].value_counts().get(x, 0) for x in ('failure', 'error'))} fail/error", + "checks_errors": len(fail_results["checks_errors"]), + "checks_new_fails": len(fail_results["checks_fails"]), + "regression_new_fails": len(fail_results["regression_fails"]), + "cves": "N/A" if cves_not_checked else f"{high_cve_count} high/critical", + "checks_known_fails": ( + "N/A" if not known_fails else len(fail_results["checks_known_fails"]) + ), + "pr_new_fails": len(fail_results["pr_new_fails"]), + }, + "build_report_links": get_build_report_links( + fail_results["job_statuses"], pr_number, branch_name, commit_sha + ), + "ci_jobs_status_html": format_results_as_html_table( + fail_results["job_statuses"] + ), + "checks_errors_html": format_results_as_html_table( + fail_results["checks_errors"] + ), + "checks_fails_html": format_results_as_html_table(fail_results["checks_fails"]), + "regression_fails_html": format_results_as_html_table( + fail_results["regression_fails"] + ), + "docker_images_cves_html": ( + "

Not Checked

" + if cves_not_checked + else format_results_as_html_table(fail_results["docker_images_cves"]) + ), + "checks_known_fails_html": ( + "

Not Checked

" + if not known_fails + else format_results_as_html_table(fail_results["checks_known_fails"]) + ), + "new_fails_html": format_results_as_html_table(fail_results["pr_new_fails"]), + } + + # Render the template with the context + rendered_html = template.render(context) + + report_name = "ci_run_report.html" + report_path = Path(report_name) + report_path.write_text(rendered_html, encoding="utf-8") + + if no_upload: + print(f"Report saved to {report_path}") + exit(0) + + if pr_number == 0: + report_destination_key = f"REFs/{branch_name}/{commit_sha}/{report_name}" + else: + report_destination_key = f"PRs/{pr_number}/{commit_sha}/{report_name}" + + # Upload the report to S3 + s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL")) + + try: + s3_client.put_object( + Bucket=S3_BUCKET, + Key=report_destination_key, + Body=rendered_html, + ContentType="text/html; charset=utf-8", + ) + except NoCredentialsError: + print("Credentials not available for S3 upload.") + + return f"https://s3.amazonaws.com/{S3_BUCKET}/" + report_destination_key + + +def main(): + args = parse_args() + + report_url = create_workflow_report( + args.actions_run_url, + args.pr_number, + args.commit_sha, + args.no_upload, + args.known_fails, + args.cves, + args.mark_preview, + ) + + print(report_url) + + +if __name__ == "__main__": + main() diff --git a/.github/actions/create_workflow_report/workflow_report_hook.sh b/.github/actions/create_workflow_report/workflow_report_hook.sh new file mode 100755 index 000000000000..55f5a1ede5c3 --- /dev/null +++ b/.github/actions/create_workflow_report/workflow_report_hook.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# This script is for generating preview reports when invoked as a post-hook from a praktika job +pip install clickhouse-driver==0.2.8 numpy==1.26.4 pandas==2.0.3 jinja2==3.1.5 +ARGS="--mark-preview --known-fails tests/broken_tests.json --cves --actions-run-url $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID --pr-number $PR_NUMBER" +CMD="python3 .github/actions/create_workflow_report/create_workflow_report.py" +$CMD $ARGS + diff --git a/.github/actions/docker_setup/action.yml b/.github/actions/docker_setup/action.yml new file mode 100644 index 000000000000..56f713fa59d1 --- /dev/null +++ b/.github/actions/docker_setup/action.yml @@ -0,0 +1,32 @@ +name: Docker setup +description: Setup docker +inputs: + test_name: + description: name of the test, used in determining ipv6 configs. + default: None + type: string +runs: + using: "composite" + steps: + - name: Docker IPv6 configuration + shell: bash + if: ${{ contains(inputs.test_name, 'Stateless') }} + env: + ipv6_subnet: 2001:3984:3989::/64 + run: | + # make sure docker uses proper IPv6 config + sudo touch /etc/docker/daemon.json + sudo chown ubuntu:ubuntu /etc/docker/daemon.json + sudo cat < /etc/docker/daemon.json + { + "ipv6": true, + "fixed-cidr-v6": "${{ env.ipv6_subnet }}" + } + EOT + sudo chown root:root /etc/docker/daemon.json + sudo systemctl restart docker + sudo systemctl status docker + - name: Docker info + shell: bash + run: | + docker info diff --git a/.github/actions/runner_setup/action.yml b/.github/actions/runner_setup/action.yml new file mode 100644 index 000000000000..5a229fdd47e7 --- /dev/null +++ b/.github/actions/runner_setup/action.yml @@ -0,0 +1,19 @@ +name: Setup +description: Setup environment +runs: + using: "composite" + steps: + - name: Setup zram + shell: bash + run: | + sudo modprobe zram + MemTotal=$(grep -Po "(?<=MemTotal:)\s+\d+" /proc/meminfo) # KiB + Percent=200 + ZRAM_SIZE=$(($MemTotal / 1024 / 1024 * $Percent / 100)) # Convert to GiB + .github/retry.sh 30 2 sudo zramctl --size ${ZRAM_SIZE}GiB --algorithm zstd /dev/zram0 + sudo mkswap /dev/zram0 && sudo swapon -p 100 /dev/zram0 + sudo sysctl vm.swappiness=200 + - name: Install awscli + shell: bash + run: | + .github/retry.sh 10 30 sudo apt-get install -y awscli diff --git a/.github/grype/parse_vulnerabilities_grype.py b/.github/grype/parse_vulnerabilities_grype.py new file mode 100644 index 000000000000..fec2ef3bfac7 --- /dev/null +++ b/.github/grype/parse_vulnerabilities_grype.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +import json + +from testflows.core import * + +xfails = {} + + +@Name("docker vulnerabilities") +@XFails(xfails) +@TestModule +def docker_vulnerabilities(self): + with Given("I gather grype scan results"): + with open("./result.json", "r") as f: + results = json.load(f) + + for vulnerability in results["matches"]: + with Test( + f"{vulnerability['vulnerability']['id']}@{vulnerability['vulnerability']['namespace']},{vulnerability['vulnerability']['severity']}", + flags=TE, + ): + note(vulnerability) + critical_levels = set(["HIGH", "CRITICAL"]) + if vulnerability['vulnerability']["severity"].upper() in critical_levels: + with Then( + f"Found vulnerability of {vulnerability['vulnerability']['severity']} severity" + ): + result(Fail) + + +if main(): + docker_vulnerabilities() diff --git a/.github/grype/run_grype_scan.sh b/.github/grype/run_grype_scan.sh new file mode 100755 index 000000000000..af428e37d669 --- /dev/null +++ b/.github/grype/run_grype_scan.sh @@ -0,0 +1,18 @@ +set -x +set -e + +IMAGE=$1 + +GRYPE_VERSION=${GRYPE_VERSION:-"v0.92.2"} + +docker pull $IMAGE +docker pull anchore/grype:${GRYPE_VERSION} + +docker run \ + --rm --volume /var/run/docker.sock:/var/run/docker.sock \ + --name Grype anchore/grype:${GRYPE_VERSION} \ + --scope all-layers \ + -o json \ + $IMAGE > result.json + +ls -sh diff --git a/.github/grype/transform_and_upload_results_s3.sh b/.github/grype/transform_and_upload_results_s3.sh new file mode 100755 index 000000000000..38674d7a2a26 --- /dev/null +++ b/.github/grype/transform_and_upload_results_s3.sh @@ -0,0 +1,20 @@ +DOCKER_IMAGE=$(echo "$DOCKER_IMAGE" | sed 's/[\/:]/_/g') + +if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/grype/$DOCKER_IMAGE" +else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA/grype/$DOCKER_IMAGE" +fi + +S3_PATH="s3://$S3_BUCKET/$PREFIX" +HTTPS_RESULTS_PATH="https://$S3_BUCKET.s3.amazonaws.com/index.html#$PREFIX/" +HTTPS_REPORT_PATH="https://s3.amazonaws.com/$S3_BUCKET/$PREFIX/results.html" +echo "https_report_path=$HTTPS_REPORT_PATH" >> $GITHUB_OUTPUT + +tfs --no-colors transform nice raw.log nice.log.txt +tfs --no-colors report results -a $HTTPS_RESULTS_PATH raw.log - --copyright "Altinity LTD" | tfs --no-colors document convert > results.html + +aws s3 cp --no-progress nice.log.txt $S3_PATH/nice.log.txt --content-type "text/plain; charset=utf-8" || echo "nice log file not found". +aws s3 cp --no-progress results.html $S3_PATH/results.html || echo "results file not found". +aws s3 cp --no-progress raw.log $S3_PATH/raw.log || echo "raw.log file not found". +aws s3 cp --no-progress result.json $S3_PATH/result.json --content-type "text/plain; charset=utf-8" || echo "result.json not found". \ No newline at end of file diff --git a/.github/retry.sh b/.github/retry.sh new file mode 100755 index 000000000000..566c2cf11315 --- /dev/null +++ b/.github/retry.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Execute command until exitcode is 0 or +# maximum number of retries is reached +# Example: +# ./retry +retries=$1 +delay=$2 +command="${@:3}" +exitcode=0 +try=0 +until [ "$try" -ge $retries ] +do + echo "$command" + eval "$command" + exitcode=$? + if [ $exitcode -eq 0 ]; then + break + fi + try=$((try+1)) + sleep $2 +done +exit $exitcode diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 000000000000..56415c2a7478 --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,13 @@ +## Scheduled Build Run Results + +Results for **the latest** release_workflow scheduled runs. + +| Branch | Status | +| ------------ | - | +| **`antalya`** | [![antalya](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=antalya&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aantalya) | +| **`project-antalya-24.12.2`** | [![project-antalya-24.12.2](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=project-antalya-24.12.2&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aproject-antalya-24.12.2) | +| **`customizations/22.8.21`** | [![customizations/22.8.21](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/22.8.21&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/22.8.21) | +| **`customizations/23.3.19`** | [![customizations/23.3.19](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/23.3.19&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.3.19) | +| **`customizations/23.8.16`** | [![customizations/23.8.16](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/23.8.16&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.8.16) | +| **`customizations/24.3.14`** | [![customizations/24.3.14](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/24.3.14&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.3.14) | +| **`customizations/24.8.11`** | [![customizations/24.8.11](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/24.8.11&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.8.11) | diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 294557a38b0f..2b6c2a3e59ad 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -3,6 +3,13 @@ name: BackportPR on: + workflow_dispatch: + inputs: + no_cache: + description: Run without cache + required: false + type: boolean + default: false pull_request: branches: ['2[1-9].[1-9][0-9]', '2[1-9].[1-9]'] @@ -10,8 +17,22 @@ env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 DISABLE_CI_MERGE_COMMIT: ${{ vars.DISABLE_CI_MERGE_COMMIT || '0' }} - DISABLE_CI_CACHE: ${{ vars.DISABLE_CI_CACHE || '0' }} + DISABLE_CI_CACHE: ${{ github.event.inputs.no_cache || '0' }} CHECKOUT_REF: ${{ vars.DISABLE_CI_MERGE_COMMIT == '1' && github.event.pull_request.head.sha || '' }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }} + AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }} + AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }} + AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + GH_TOKEN: ${{ github.token }} # Allow updating GH commit statuses and PR comments to post an actual job reports link permissions: write-all @@ -19,7 +40,7 @@ permissions: write-all jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -30,6 +51,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -56,7 +97,7 @@ jobs: fi dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -68,6 +109,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -94,7 +142,7 @@ jobs: fi dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -106,6 +154,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -132,7 +187,7 @@ jobs: fi build_amd_debug: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} name: "Build (amd_debug)" @@ -144,6 +199,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -170,7 +232,7 @@ jobs: fi build_amd_release: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} name: "Build (amd_release)" @@ -182,6 +244,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -208,7 +277,7 @@ jobs: fi build_amd_asan: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} name: "Build (amd_asan)" @@ -220,6 +289,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -246,7 +322,7 @@ jobs: fi build_amd_tsan: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} name: "Build (amd_tsan)" @@ -258,6 +334,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -284,7 +367,7 @@ jobs: fi build_arm_release: - runs-on: [self-hosted, builder-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} name: "Build (arm_release)" @@ -296,6 +379,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -322,7 +412,7 @@ jobs: fi build_amd_darwin: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }} name: "Build (amd_darwin)" @@ -334,6 +424,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_darwin)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -360,7 +457,7 @@ jobs: fi build_arm_darwin: - runs-on: [self-hosted, builder-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }} name: "Build (arm_darwin)" @@ -372,6 +469,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_darwin)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -398,7 +502,7 @@ jobs: fi docker_server_image: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} name: "Docker server image" @@ -410,6 +514,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker server image" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -436,7 +547,7 @@ jobs: fi docker_keeper_image: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} name: "Docker keeper image" @@ -448,6 +559,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker keeper image" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -474,7 +592,7 @@ jobs: fi install_packages_amd_debug: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX2RlYnVnKQ==') }} name: "Install packages (amd_debug)" @@ -486,6 +604,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -512,7 +637,7 @@ jobs: fi compatibility_check_release: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }} name: "Compatibility check (release)" @@ -524,6 +649,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (release)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -550,7 +682,7 @@ jobs: fi compatibility_check_aarch64: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_release] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }} name: "Compatibility check (aarch64)" @@ -562,6 +694,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (aarch64)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -588,7 +727,7 @@ jobs: fi stateless_tests_amd_asan_distributed_plan_parallel_1_2: - runs-on: [self-hosted, amd-medium-cpu] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" @@ -600,6 +739,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -626,7 +772,7 @@ jobs: fi stateless_tests_amd_asan_distributed_plan_parallel_2_2: - runs-on: [self-hosted, amd-medium-cpu] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" @@ -638,6 +784,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -664,7 +817,7 @@ jobs: fi stateless_tests_amd_asan_distributed_plan_sequential: - runs-on: [self-hosted, amd-small-mem] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} name: "Stateless tests (amd_asan, distributed plan, sequential)" @@ -676,6 +829,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -702,7 +862,7 @@ jobs: fi stress_test_amd_tsan: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} name: "Stress test (amd_tsan)" @@ -714,6 +874,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -740,7 +907,7 @@ jobs: fi integration_tests_amd_asan_old_analyzer_1_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} name: "Integration tests (amd_asan, old analyzer, 1/6)" @@ -752,6 +919,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -778,7 +952,7 @@ jobs: fi integration_tests_amd_asan_old_analyzer_2_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} name: "Integration tests (amd_asan, old analyzer, 2/6)" @@ -790,6 +964,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -816,7 +997,7 @@ jobs: fi integration_tests_amd_asan_old_analyzer_3_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} name: "Integration tests (amd_asan, old analyzer, 3/6)" @@ -828,6 +1009,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -854,7 +1042,7 @@ jobs: fi integration_tests_amd_asan_old_analyzer_4_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} name: "Integration tests (amd_asan, old analyzer, 4/6)" @@ -866,6 +1054,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -892,7 +1087,7 @@ jobs: fi integration_tests_amd_asan_old_analyzer_5_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} name: "Integration tests (amd_asan, old analyzer, 5/6)" @@ -904,6 +1099,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -930,7 +1132,7 @@ jobs: fi integration_tests_amd_asan_old_analyzer_6_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} name: "Integration tests (amd_asan, old analyzer, 6/6)" @@ -942,6 +1144,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -968,7 +1177,7 @@ jobs: fi integration_tests_amd_tsan_1_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} name: "Integration tests (amd_tsan, 1/6)" @@ -980,6 +1189,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1006,7 +1222,7 @@ jobs: fi integration_tests_amd_tsan_2_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} name: "Integration tests (amd_tsan, 2/6)" @@ -1018,6 +1234,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1044,7 +1267,7 @@ jobs: fi integration_tests_amd_tsan_3_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} name: "Integration tests (amd_tsan, 3/6)" @@ -1056,6 +1279,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1082,7 +1312,7 @@ jobs: fi integration_tests_amd_tsan_4_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} name: "Integration tests (amd_tsan, 4/6)" @@ -1094,6 +1324,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1120,7 +1357,7 @@ jobs: fi integration_tests_amd_tsan_5_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} name: "Integration tests (amd_tsan, 5/6)" @@ -1132,6 +1369,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1158,7 +1402,7 @@ jobs: fi integration_tests_amd_tsan_6_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} name: "Integration tests (amd_tsan, 6/6)" @@ -1170,6 +1414,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1196,7 +1447,7 @@ jobs: fi finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_arm_release, build_amd_darwin, build_arm_darwin, docker_server_image, docker_keeper_image, install_packages_amd_debug, compatibility_check_release, compatibility_check_aarch64, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_distributed_plan_sequential, stress_test_amd_tsan, integration_tests_amd_asan_old_analyzer_1_6, integration_tests_amd_asan_old_analyzer_2_6, integration_tests_amd_asan_old_analyzer_3_6, integration_tests_amd_asan_old_analyzer_4_6, integration_tests_amd_asan_old_analyzer_5_6, integration_tests_amd_asan_old_analyzer_6_6, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6] if: ${{ !cancelled() }} name: "Finish Workflow" @@ -1208,6 +1459,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp diff --git a/.github/workflows/cancel.yml b/.github/workflows/cancel.yml new file mode 100644 index 000000000000..c1e11ef212cd --- /dev/null +++ b/.github/workflows/cancel.yml @@ -0,0 +1,19 @@ +name: Cancel + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + +on: # yamllint disable-line rule:truthy + workflow_run: + workflows: ["PR","PullRequestCI", "ReleaseBranchCI", "DocsCheck", "BackportPR"] + types: + - requested +jobs: + cancel: + runs-on: ubuntu-latest + steps: + - uses: styfle/cancel-workflow-action@0.9.1 + with: + all_but_latest: true + workflow_id: ${{ github.event.workflow.id }} diff --git a/.github/workflows/cherry_pick.yml b/.github/workflows/cherry_pick.yml index 315673d4abcc..8e5191eb33cc 100644 --- a/.github/workflows/cherry_pick.yml +++ b/.github/workflows/cherry_pick.yml @@ -28,7 +28,7 @@ jobs: REPO_TEAM=core EOF - name: Check out repository code - uses: ClickHouse/checkout@v1 + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f with: clear-repository: true token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} diff --git a/.github/workflows/compare_fails.yml b/.github/workflows/compare_fails.yml new file mode 100644 index 000000000000..1f734845ac1a --- /dev/null +++ b/.github/workflows/compare_fails.yml @@ -0,0 +1,110 @@ +name: Compare CI Failures + +on: + workflow_dispatch: + inputs: + current_ref: + description: 'Current reference (commit hash or git tag) (default: current commit on selected branch)' + required: false + type: string + previous_ref: + description: 'Previous reference to compare with (commit hash, git tag or workflow url) (default: previous stable tag for current reference)' + required: false + type: string + upstream_ref: + description: 'Upstream reference to compare with (commit hash, git tag or MAJOR.MINOR version) (default: previous lts tag for current reference)' + required: false + type: string + include_broken: + description: 'Include BROKEN tests in comparison' + required: false + type: boolean + default: false + push: + tags: + - 'v*.altinity*' + +env: + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + +jobs: + Compare: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + steps: + - name: Check commit status + run: | + if [[ "${{ github.event_name }}" == "workflow_dispatch" && -n "${{ inputs.current_ref }}" ]]; then + # For workflow_dispatch with custom ref, skip the check + exit 0 + fi + + # Query GitHub API for commit status + STATUSES=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + "https://api.github.com/repos/${{ github.repository }}/commits/${{ github.sha }}/status") + + # Check if there are any statuses + if [ "$(echo $STATUSES | jq '.total_count')" -eq 0 ]; then + echo "No commit statuses found for ${{ github.sha }}. Assuming tests have not run yet. Aborting workflow." + exit 1 + fi + + echo "Found commit statuses, proceeding with comparison." + + - name: Check out repository code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ inputs.current_ref || github.ref }} + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + cache: 'pip' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install clickhouse-driver requests pandas tabulate + + - name: Set default refs + id: default_refs + run: | + VERSION=$(git describe --tags --abbrev=0 | sed 's/v\([0-9]\+\.[0-9]\+\).*/\1/') + echo "Detected version: $VERSION" + + CURRENT_TAG=$(git tag --contains ${{ inputs.current_ref || github.sha }} | sort -r | grep -m 1 'altinity' || echo '') + echo "CURRENT_TAG: '$CURRENT_TAG' ${{ inputs.current_ref || github.sha }}" + PREVIOUS_TAG_COMMIT=$(git log -1 --until=yesterday --tags=v${VERSION}*.altinity* | grep -Po "(?<=commit ).*") + PREVIOUS_TAG=$(git tag --contains $PREVIOUS_TAG_COMMIT | sort -r | grep -m 1 'altinity') + echo "PREVIOUS_TAG: '$PREVIOUS_TAG' $PREVIOUS_TAG_COMMIT" + UPSTREAM_TAG_COMMIT=$(git log -1 --tags=v${VERSION}*-lts | grep -Po "(?<=commit ).*") + UPSTREAM_TAG=$(git tag --contains $UPSTREAM_TAG_COMMIT | sort -r | grep -m 1 'lts') + echo "UPSTREAM_TAG: '$UPSTREAM_TAG' $UPSTREAM_TAG_COMMIT" + + echo "PREVIOUS_TAG=$PREVIOUS_TAG" >> $GITHUB_OUTPUT + echo "PREVIOUS_TAG_COMMIT=$PREVIOUS_TAG_COMMIT" >> $GITHUB_OUTPUT + echo "UPSTREAM_TAG=$UPSTREAM_TAG" >> $GITHUB_OUTPUT + echo "UPSTREAM_TAG_COMMIT=$UPSTREAM_TAG_COMMIT" >> $GITHUB_OUTPUT + echo "CURRENT_TAG=$CURRENT_TAG" >> $GITHUB_OUTPUT + - name: Comparison report + if: ${{ !cancelled() }} + run: | + git clone https://github.com/Altinity/actions.git + cd actions + git checkout 4623f919ee2738bea69aad405879562476736932 + python3 scripts/compare_ci_fails.py \ + --current-ref ${{ steps.default_refs.outputs.CURRENT_TAG || inputs.current_ref || github.sha }} \ + --previous-ref ${{ steps.default_refs.outputs.PREVIOUS_TAG || inputs.previous_ref || steps.default_refs.outputs.PREVIOUS_TAG_COMMIT }} \ + --upstream-ref ${{ steps.default_refs.outputs.UPSTREAM_TAG || inputs.upstream_ref || steps.default_refs.outputs.UPSTREAM_TAG_COMMIT }} \ + ${{ inputs.include_broken && '--broken' || '' }} + cat comparison_results.md >> $GITHUB_STEP_SUMMARY + + - name: Upload comparison results + uses: actions/upload-artifact@v4 + with: + name: comparison-results + path: | + actions/comparison_results.md diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index dc708514dfd5..421261fb436f 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -70,7 +70,7 @@ jobs: runs-on: [self-hosted, release-maker] steps: - name: Check out repository code - uses: ClickHouse/checkout@v1 + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f with: token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} fetch-depth: 0 diff --git a/.github/workflows/docker_publish.yml b/.github/workflows/docker_publish.yml new file mode 100644 index 000000000000..1e59aa8b5b8d --- /dev/null +++ b/.github/workflows/docker_publish.yml @@ -0,0 +1,150 @@ +name: Republish Multiarch Docker Image + +on: + workflow_dispatch: + inputs: + docker_image: + description: 'Multiarch Docker image with tag' + required: true + release_environment: + description: 'Select release type: "staging" or "production"' + type: choice + default: 'staging' + options: + - staging + - production + upload_artifacts: + description: 'Upload artifacts directly in this workflow' + type: boolean + default: true + s3_upload_path: + description: 'Upload artifacts to s3 path' + type: string + required: false + workflow_call: + inputs: + docker_image: + type: string + required: true + release_environment: + type: string + required: false + default: 'staging' + upload_artifacts: + type: boolean + required: false + default: false + s3_upload_path: + type: string + required: false + outputs: + image_archives_path: + description: 'Path to the image archives directory' + value: ${{ jobs.republish.outputs.image_archives_path }} + +env: + IMAGE: ${{ github.event.inputs.docker_image || inputs.docker_image }} + +jobs: + republish: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + outputs: + image_archives_path: ${{ steps.set_path.outputs.image_archives_path }} + steps: + - name: Docker Hub Login + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_TOKEN }} + + - name: Set clickhouse-server version as new tag + run: | + # Determine "clickhouse-server" or "clickhouse-keeper" + echo "Input IMAGE: $IMAGE" + COMPONENT=$(echo "$IMAGE" | sed -E 's|.*/(clickhouse-[^:]+):.*|\1|') + echo "Component determined: $COMPONENT" + echo "COMPONENT=$COMPONENT" >> $GITHUB_ENV + + # Pull the image + echo "Pulling the image" + docker pull $IMAGE + + # Get version and clean it up + echo "Getting version from image..." + VERSION_OUTPUT=$(docker run --rm $IMAGE $COMPONENT --version) + echo "Raw version output: $VERSION_OUTPUT" + + # Extract just the version number + NEW_TAG=$(echo "$VERSION_OUTPUT" | sed -E 's/.*version ([0-9.]+[^ ]*).*/\1/') + echo "Cleaned version: $NEW_TAG" + + # Append "-prerelease" if necessary + if [ "${{ github.event.inputs.release_environment || inputs.release_environment }}" = "staging" ]; then + NEW_TAG="${NEW_TAG}-prerelease" + fi + + if [[ "$IMAGE" == *-alpine* ]]; then + NEW_TAG="${NEW_TAG}-alpine" + fi + echo "New tag: $NEW_TAG" + + # Export the new tag + echo "NEW_TAG=$NEW_TAG" >> $GITHUB_ENV + + - name: Process multiarch manifest + run: | + echo "Re-tag multiarch image $IMAGE to altinity/$COMPONENT:$NEW_TAG" + docker buildx imagetools create --tag "altinity/$COMPONENT:$NEW_TAG" "$IMAGE" + + # Create directory for image archives + mkdir -p image_archives + + # Pull and save platform-specific images + for PLATFORM in "linux/amd64" "linux/arm64"; do + echo "Pulling and saving image for $PLATFORM..." + # Pull the specific platform image + docker pull --platform $PLATFORM "altinity/$COMPONENT:$NEW_TAG" + + # Save the image to a tar file + ARCH=$(echo $PLATFORM | cut -d'/' -f2) + docker save "altinity/$COMPONENT:$NEW_TAG" -o "image_archives/${COMPONENT}-${NEW_TAG}-${ARCH}.tar" + done + + # Save manifest inspection + docker buildx imagetools inspect "altinity/$COMPONENT:$NEW_TAG" > image_archives/manifest.txt + + # Compress the archives + cd image_archives + for file in *.tar; do + gzip "$file" + done + cd .. + + - name: Set image archives path + id: set_path + run: | + echo "image_archives_path=${{ github.workspace }}/image_archives" >> $GITHUB_OUTPUT + + - name: Upload image archives + if: ${{ github.event.inputs.upload_artifacts || inputs.upload_artifacts }} + uses: actions/upload-artifact@v4 + with: + name: docker-images-backup + path: image_archives/ + retention-days: 90 + + - name: Install aws cli + if: ${{ inputs.s3_upload_path != '' }} + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + + - name: Upload to S3 + if: ${{ inputs.s3_upload_path != '' }} + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + run: | + aws s3 sync image_archives/ "${{ inputs.s3_upload_path }}" + diff --git a/.github/workflows/grype_scan.yml b/.github/workflows/grype_scan.yml new file mode 100644 index 000000000000..a92fec5f9879 --- /dev/null +++ b/.github/workflows/grype_scan.yml @@ -0,0 +1,154 @@ +name: Grype Scan +run-name: Grype Scan ${{ inputs.docker_image }} + +on: + workflow_dispatch: + # Inputs for manual run + inputs: + docker_image: + description: 'Docker image. If no tag, it will be determined by version_helper.py' + required: true + workflow_call: + # Inputs for workflow call + inputs: + docker_image: + description: 'Docker image. If no tag, it will be determined by version_helper.py' + required: true + type: string + version: + description: 'Version tag. If no version, it will be determined by version_helper.py' + required: false + type: string + default: "" + tag-suffix: + description: 'Tag suffix. To be appended the version from version_helper.py' + required: false + type: string + default: "" +env: + PYTHONUNBUFFERED: 1 + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + GRYPE_VERSION: "v0.92.2-arm64v8" + +jobs: + grype_scan: + name: Grype Scan + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker + uses: docker/setup-buildx-action@v3 + + - name: Set up Python + run: | + export TESTFLOWS_VERSION="2.4.19" + sudo apt-get update + sudo apt-get install -y python3-pip python3-venv + python3 -m venv venv + source venv/bin/activate + pip install --upgrade requests chardet urllib3 unidiff boto3 PyGithub + pip install testflows==$TESTFLOWS_VERSION awscli==1.33.28 + echo PATH=$PATH >>$GITHUB_ENV + + - name: Set image tag if not given + if: ${{ !contains(inputs.docker_image, ':') }} + id: set_version + env: + TAG_SUFFIX: ${{ inputs.tag-suffix }} + SPECIFIED_VERSION: ${{ inputs.version }} + run: | + python3 ./tests/ci/version_helper.py | grep = | tee /tmp/version_info + source /tmp/version_info + if [ -z "$SPECIFIED_VERSION" ]; then + VERSION=$CLICKHOUSE_VERSION_STRING + else + VERSION=$SPECIFIED_VERSION + fi + echo "docker_image=${{ inputs.docker_image }}:$PR_NUMBER-$VERSION$TAG_SUFFIX" >> $GITHUB_OUTPUT + + - name: Run Grype Scan + run: | + DOCKER_IMAGE=${{ steps.set_version.outputs.docker_image || inputs.docker_image }} + ./.github/grype/run_grype_scan.sh $DOCKER_IMAGE + + - name: Parse grype results + run: | + python3 -u ./.github/grype/parse_vulnerabilities_grype.py -o nice --no-colors --log raw.log --test-to-end + + - name: Transform and Upload Grype Results + if: always() + id: upload_results + env: + S3_BUCKET: "altinity-build-artifacts" + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + PR_NUMBER: ${{ env.PR_NUMBER || github.event.pull_request.number || 0 }} + DOCKER_IMAGE: ${{ steps.set_version.outputs.docker_image || inputs.docker_image }} + run: | + echo "PR_NUMBER=$PR_NUMBER" + ./.github/grype/transform_and_upload_results_s3.sh + + - name: Create step summary + if: always() + id: create_summary + run: | + jq -r '"**Image**: \(.source.target.userInput)"' result.json >> $GITHUB_STEP_SUMMARY + jq -r '.distro | "**Distro**: \(.name):\(.version)"' result.json >> $GITHUB_STEP_SUMMARY + if jq -e '.matches | length == 0' result.json > /dev/null; then + echo "No CVEs" >> $GITHUB_STEP_SUMMARY + else + echo "| Severity | Count |" >> $GITHUB_STEP_SUMMARY + echo "|------------|-------|" >> $GITHUB_STEP_SUMMARY + jq -r ' + .matches | + map(.vulnerability.severity) | + group_by(.) | + map({severity: .[0], count: length}) | + sort_by(.severity) | + map("| \(.severity) | \(.count) |") | + .[] + ' result.json >> $GITHUB_STEP_SUMMARY + fi + + HIGH_COUNT=$(jq -r '.matches | map(.vulnerability) | unique_by(.id) | map(.severity) | map(select(. == "High")) | length' result.json) + CRITICAL_COUNT=$(jq -r '.matches | map(.vulnerability) | unique_by(.id) | map(.severity) | map(select(. == "Critical")) | length' result.json) + TOTAL_HIGH_CRITICAL=$((HIGH_COUNT + CRITICAL_COUNT)) + echo "total_high_critical=$TOTAL_HIGH_CRITICAL" >> $GITHUB_OUTPUT + + if [ $TOTAL_HIGH_CRITICAL -gt 0 ]; then + echo '## High and Critical vulnerabilities found' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + cat raw.log | tfs --no-colors show tests | grep -Pi 'High|Critical' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + fi + + - name: Set commit status + if: always() + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const totalHighCritical = '${{ steps.create_summary.outputs.total_high_critical }}'; + const hasError = totalHighCritical === ''; + const hasVulnerabilities = parseInt(totalHighCritical) > 0; + github.rest.repos.createCommitStatus({ + owner: context.repo.owner, + repo: context.repo.repo, + sha: '${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}', + state: hasError ? 'error' : hasVulnerabilities ? 'failure' : 'success', + target_url: '${{ steps.upload_results.outputs.https_report_path }}', + description: hasError ? 'An error occurred' : `Grype Scan Completed with ${totalHighCritical} high/critical vulnerabilities`, + context: 'Grype Scan ${{ steps.set_version.outputs.docker_image || inputs.docker_image }}' + }); + + - name: Upload artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: grype-results-${{ hashFiles('raw.log') }} + path: | + result.json + nice.log.txt diff --git a/.github/workflows/init_praktika.yml b/.github/workflows/init_praktika.yml new file mode 100644 index 000000000000..e9f56e0d2396 --- /dev/null +++ b/.github/workflows/init_praktika.yml @@ -0,0 +1,27 @@ +name: InitPraktikaReport + +# This workflow is used to initialize/update the praktika report in S3. +# It does not need to run often, when a new release is created should be plenty. + +on: + workflow_dispatch: + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + +jobs: + + init_praktika: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + name: "Init praktika" + steps: + - name: Init praktika report + run: | + export PYTHONPATH=./ci:.: + pip install htmlmin + python3 -m praktika html + \ No newline at end of file diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 00dbccb41836..93f21dbeaa7c 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -3,13 +3,35 @@ name: MasterCI on: + workflow_dispatch: + inputs: + no_cache: + description: Run without cache + required: false + type: boolean + default: false push: - branches: ['master'] + branches: ['antalya', 'releases/*', 'antalya-*'] env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 + DISABLE_CI_CACHE: ${{ github.event.inputs.no_cache || '0' }} CHECKOUT_REF: "" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }} + AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }} + AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }} + AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + GH_TOKEN: ${{ github.token }} # Allow updating GH commit statuses and PR comments to post an actual job reports link permissions: write-all @@ -17,7 +39,7 @@ permissions: write-all jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -28,6 +50,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -54,7 +96,7 @@ jobs: fi dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -66,6 +108,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -92,7 +141,7 @@ jobs: fi dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -104,6 +153,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -130,7 +186,7 @@ jobs: fi dockers_build_multiplatform_manifest: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAobXVsdGlwbGF0Zm9ybSBtYW5pZmVzdCk=') }} name: "Dockers Build (multiplatform manifest)" @@ -142,43 +198,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - build_arm_tidy: - runs-on: [self-hosted, builder-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV90aWR5KQ==') }} - name: "Build (arm_tidy)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Dockers Build (multiplatform manifest)" - name: Prepare env script run: | @@ -200,13 +225,13 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_tidy)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_tidy)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi build_amd_debug: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} name: "Build (amd_debug)" @@ -218,6 +243,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -244,7 +276,7 @@ jobs: fi build_amd_release: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} name: "Build (amd_release)" @@ -256,6 +288,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -282,7 +321,7 @@ jobs: fi build_amd_asan: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} name: "Build (amd_asan)" @@ -294,6 +333,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -320,7 +366,7 @@ jobs: fi build_amd_tsan: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} name: "Build (amd_tsan)" @@ -332,6 +378,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -358,7 +411,7 @@ jobs: fi build_amd_msan: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }} name: "Build (amd_msan)" @@ -370,6 +423,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -396,7 +456,7 @@ jobs: fi build_amd_ubsan: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }} name: "Build (amd_ubsan)" @@ -408,6 +468,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -434,7 +501,7 @@ jobs: fi build_amd_binary: - runs-on: [self-hosted, builder] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }} name: "Build (amd_binary)" @@ -446,6 +513,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_binary)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -472,7 +546,7 @@ jobs: fi build_arm_release: - runs-on: [self-hosted, builder-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} name: "Build (arm_release)" @@ -484,6 +558,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -509,11 +590,11 @@ jobs: python3 -m praktika run 'Build (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_arm_asan: - runs-on: [self-hosted, builder-aarch64] + build_arm_coverage: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }} - name: "Build (arm_asan)" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9jb3ZlcmFnZSk=') }} + name: "Build (arm_coverage)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -522,6 +603,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_coverage)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -542,16 +630,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Build (arm_coverage)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Build (arm_coverage)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_arm_coverage: - runs-on: [self-hosted, builder-aarch64] + build_arm_binary: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9jb3ZlcmFnZSk=') }} - name: "Build (arm_coverage)" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }} + name: "Build (arm_binary)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -560,6 +648,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_binary)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -580,16 +675,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_coverage)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Build (arm_binary)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_coverage)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Build (arm_binary)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_arm_binary: - runs-on: [self-hosted, builder-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }} - name: "Build (arm_binary)" + unit_tests_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }} + name: "Unit tests (asan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -598,6 +693,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -618,16 +720,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_binary)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_binary)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_amd_darwin: - runs-on: [self-hosted, builder] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }} - name: "Build (amd_darwin)" + unit_tests_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }} + name: "Unit tests (tsan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -636,6 +738,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -656,16 +765,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (amd_darwin)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (amd_darwin)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_arm_darwin: - runs-on: [self-hosted, builder-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }} - name: "Build (arm_darwin)" + unit_tests_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }} + name: "Unit tests (msan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -674,6 +783,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -694,16 +810,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_darwin)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_darwin)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_arm_v80compat: - runs-on: [self-hosted, builder-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV92ODBjb21wYXQp') }} - name: "Build (arm_v80compat)" + unit_tests_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }} + name: "Unit tests (ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -712,6 +828,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -732,16 +855,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_v80compat)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_v80compat)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_amd_freebsd: - runs-on: [self-hosted, builder] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9mcmVlYnNkKQ==') }} - name: "Build (amd_freebsd)" + docker_server_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + name: "Docker server image" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -750,6 +873,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker server image" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -770,16 +900,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (amd_freebsd)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (amd_freebsd)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_ppc64le: - runs-on: [self-hosted, builder-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHBwYzY0bGUp') }} - name: "Build (ppc64le)" + docker_keeper_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + name: "Docker keeper image" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -788,6 +918,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker keeper image" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -808,16 +945,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (ppc64le)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (ppc64le)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_amd_compat: - runs-on: [self-hosted, builder] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9jb21wYXQp') }} - name: "Build (amd_compat)" + install_packages_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} + name: "Install packages (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -826,6 +963,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -846,16 +990,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (amd_compat)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (amd_compat)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_amd_musl: - runs-on: [self-hosted, builder] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tdXNsKQ==') }} - name: "Build (amd_musl)" + install_packages_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} + name: "Install packages (arm_release)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -864,6 +1008,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -884,16 +1035,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (amd_musl)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (amd_musl)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_riscv64: - runs-on: [self-hosted, builder-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHJpc2N2NjQp') }} - name: "Build (riscv64)" + compatibility_check_release: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }} + name: "Compatibility check (release)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -902,6 +1053,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (release)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -922,16 +1080,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (riscv64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Compatibility check (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (riscv64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Compatibility check (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_s390x: - runs-on: [self-hosted, builder] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHMzOTB4KQ==') }} - name: "Build (s390x)" + compatibility_check_aarch64: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }} + name: "Compatibility check (aarch64)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -940,6 +1098,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (aarch64)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -960,16 +1125,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (s390x)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Compatibility check (aarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (s390x)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Compatibility check (aarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_loongarch64: - runs-on: [self-hosted, builder-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGxvb25nYXJjaDY0KQ==') }} - name: "Build (loongarch64)" + stateless_tests_amd_asan_distributed_plan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -978,6 +1143,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -998,16 +1170,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (loongarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (loongarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_fuzzers: - runs-on: [self-hosted, builder-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGZ1enplcnMp') }} - name: "Build (fuzzers)" + stateless_tests_amd_asan_distributed_plan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -1016,6 +1188,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1036,16 +1215,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (fuzzers)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (fuzzers)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - unit_tests_asan: - runs-on: [self-hosted, builder] + stateless_tests_amd_asan_distributed_plan_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }} - name: "Unit tests (asan)" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} + name: "Stateless tests (amd_asan, distributed plan, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -1054,6 +1233,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1074,16 +1260,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - unit_tests_tsan: - runs-on: [self-hosted, builder] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }} - name: "Unit tests (tsan)" + stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgcGFyYWxsZWwp') }} + name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -1092,6 +1278,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1112,16 +1305,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - unit_tests_msan: - runs-on: [self-hosted, builder] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }} - name: "Unit tests (msan)" + stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgc2VxdWVudGlhbCk=') }} + name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -1130,6 +1323,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1150,16 +1350,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - unit_tests_ubsan: - runs-on: [self-hosted, builder] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }} - name: "Unit tests (ubsan)" + stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} + name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -1168,6 +1368,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1188,16 +1395,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - docker_server_image: - runs-on: [self-hosted, style-checker] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} - name: "Docker server image" + stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -1206,6 +1413,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -1226,1042 +1440,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - docker_keeper_image: - runs-on: [self-hosted, style-checker] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} - name: "Docker keeper image" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - install_packages_amd_release: - runs-on: [self-hosted, style-checker] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} - name: "Install packages (amd_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - install_packages_arm_release: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} - name: "Install packages (arm_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - compatibility_check_release: - runs-on: [self-hosted, style-checker] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }} - name: "Compatibility check (release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Compatibility check (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Compatibility check (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - compatibility_check_aarch64: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }} - name: "Compatibility check (aarch64)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Compatibility check (aarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Compatibility check (aarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_asan_distributed_plan_parallel_1_2: - runs-on: [self-hosted, amd-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_asan_distributed_plan_parallel_2_2: - runs-on: [self-hosted, amd-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_asan_distributed_plan_sequential: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} - name: "Stateless tests (amd_asan, distributed plan, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgcGFyYWxsZWwp') }} - name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgc2VxdWVudGlhbCk=') }} - name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} - name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_asyncinsert_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} - name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_asyncinsert_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_parallel: - runs-on: [self-hosted, amd-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_debug, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_debug, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_parallel_1_2: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_tsan, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_parallel_2_2: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_tsan, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_sequential_1_2: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_tsan, sequential, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_sequential_2_2: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_tsan, sequential, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_msan_parallel_1_2: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_msan, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_msan_parallel_2_2: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_msan, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_msan_sequential_1_2: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_msan, sequential, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_msan_sequential_2_2: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_msan, sequential, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_ubsan_parallel: - runs-on: [self-hosted, amd-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_ubsan, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_ubsan_sequential: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_ubsan, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_distributed_plan_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_distributed_plan_s3_storage_sequential: - runs-on: [self-hosted, amd-small] + stateless_tests_amd_debug_asyncinsert_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} + name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2270,43 +1458,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_s3_storage_parallel: - runs-on: [self-hosted, amd-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwp') }} - name: "Stateless tests (amd_tsan, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)" - name: Prepare env script run: | @@ -2328,16 +1485,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_amd_tsan_s3_storage_sequential_1_2: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" + stateless_tests_amd_debug_asyncinsert_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2346,6 +1503,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2366,16 +1530,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_amd_tsan_s3_storage_sequential_2_2: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" + stateless_tests_amd_debug_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2384,6 +1548,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2404,16 +1575,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_arm_binary_parallel: - runs-on: [self-hosted, arm-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }} - name: "Stateless tests (arm_binary, parallel)" + stateless_tests_amd_debug_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_debug, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2422,6 +1593,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2442,16 +1620,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_arm_binary_sequential: - runs-on: [self-hosted, arm-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (arm_binary, sequential)" + stateless_tests_amd_tsan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_tsan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2460,6 +1638,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2480,16 +1665,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_arm_asan_azure_parallel: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (arm_asan, azure, parallel)" + stateless_tests_amd_tsan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_tsan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2498,6 +1683,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2518,16 +1710,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_asan, azure, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (arm_asan, azure, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_arm_asan_azure_sequential: - runs-on: [self-hosted, arm-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (arm_asan, azure, sequential)" + stateless_tests_amd_tsan_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_tsan, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2536,6 +1728,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2556,16 +1755,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_asan, azure, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (arm_asan, azure, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_old_analyzer_1_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} - name: "Integration tests (amd_asan, old analyzer, 1/6)" + stateless_tests_amd_tsan_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_tsan, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2574,6 +1773,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2594,16 +1800,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_old_analyzer_2_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} - name: "Integration tests (amd_asan, old analyzer, 2/6)" + stateless_tests_amd_msan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_msan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2612,6 +1818,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2632,16 +1845,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_old_analyzer_3_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} - name: "Integration tests (amd_asan, old analyzer, 3/6)" + stateless_tests_amd_msan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_msan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2650,6 +1863,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2670,16 +1890,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_old_analyzer_4_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} - name: "Integration tests (amd_asan, old analyzer, 4/6)" + stateless_tests_amd_msan_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_msan, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2688,6 +1908,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2708,16 +1935,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_old_analyzer_5_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} - name: "Integration tests (amd_asan, old analyzer, 5/6)" + stateless_tests_amd_msan_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_msan, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2726,6 +1953,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2746,16 +1980,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_old_analyzer_6_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} - name: "Integration tests (amd_asan, old analyzer, 6/6)" + stateless_tests_amd_ubsan_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_ubsan, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2764,6 +1998,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_ubsan, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2784,16 +2025,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_1_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }} - name: "Integration tests (amd_binary, 1/5)" + stateless_tests_amd_ubsan_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_ubsan, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2802,6 +2043,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_ubsan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2822,16 +2070,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_2_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }} - name: "Integration tests (amd_binary, 2/5)" + stateless_tests_amd_debug_distributed_plan_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2840,6 +2088,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2860,16 +2115,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_3_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }} - name: "Integration tests (amd_binary, 3/5)" + stateless_tests_amd_debug_distributed_plan_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2878,6 +2133,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2898,16 +2160,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_4_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }} - name: "Integration tests (amd_binary, 4/5)" + stateless_tests_amd_tsan_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwp') }} + name: "Stateless tests (amd_tsan, s3 storage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2916,6 +2178,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2936,16 +2205,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_5_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }} - name: "Integration tests (amd_binary, 5/5)" + stateless_tests_amd_tsan_s3_storage_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2954,6 +2223,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -2974,16 +2250,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_arm_binary_distributed_plan_1_4: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 1/4)" + stateless_tests_amd_tsan_s3_storage_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -2992,6 +2268,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3012,16 +2295,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_arm_binary_distributed_plan_2_4: - runs-on: [self-hosted, arm-medium] + stateless_tests_arm_binary_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 2/4)" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }} + name: "Stateless tests (arm_binary, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3030,6 +2313,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3050,16 +2340,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_arm_binary_distributed_plan_3_4: - runs-on: [self-hosted, arm-medium] + stateless_tests_arm_binary_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 3/4)" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (arm_binary, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3068,6 +2358,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3088,16 +2385,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_arm_binary_distributed_plan_4_4: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 4/4)" + integration_tests_amd_asan_old_analyzer_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} + name: "Integration tests (amd_asan, old analyzer, 1/6)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3106,6 +2403,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3126,16 +2430,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_1_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} - name: "Integration tests (amd_tsan, 1/6)" + integration_tests_amd_asan_old_analyzer_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} + name: "Integration tests (amd_asan, old analyzer, 2/6)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3144,6 +2448,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3164,16 +2475,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_2_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} - name: "Integration tests (amd_tsan, 2/6)" + integration_tests_amd_asan_old_analyzer_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} + name: "Integration tests (amd_asan, old analyzer, 3/6)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3182,6 +2493,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3202,16 +2520,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_3_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} - name: "Integration tests (amd_tsan, 3/6)" + integration_tests_amd_asan_old_analyzer_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} + name: "Integration tests (amd_asan, old analyzer, 4/6)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3220,6 +2538,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3240,16 +2565,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_4_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} - name: "Integration tests (amd_tsan, 4/6)" + integration_tests_amd_asan_old_analyzer_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} + name: "Integration tests (amd_asan, old analyzer, 5/6)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3258,6 +2583,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3278,16 +2610,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_5_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} - name: "Integration tests (amd_tsan, 5/6)" + integration_tests_amd_asan_old_analyzer_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} + name: "Integration tests (amd_asan, old analyzer, 6/6)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3296,6 +2628,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, old analyzer, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3316,16 +2655,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_6_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} - name: "Integration tests (amd_tsan, 6/6)" + integration_tests_amd_binary_1_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }} + name: "Integration tests (amd_binary, 1/5)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3334,6 +2673,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 1/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3354,16 +2700,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_arm_coverage_parallel: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_coverage] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fY292ZXJhZ2UsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (arm_coverage, parallel)" + integration_tests_amd_binary_2_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }} + name: "Integration tests (amd_binary, 2/5)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3372,6 +2718,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 2/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3392,16 +2745,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_coverage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (arm_coverage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_arm_coverage_sequential: - runs-on: [self-hosted, arm-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_coverage] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fY292ZXJhZ2UsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (arm_coverage, sequential)" + integration_tests_amd_binary_3_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }} + name: "Integration tests (amd_binary, 3/5)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3410,6 +2763,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 3/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3430,16 +2790,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_coverage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (arm_coverage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }} - name: "Stress test (amd_debug)" + integration_tests_amd_binary_4_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }} + name: "Integration tests (amd_binary, 4/5)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3448,6 +2808,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 4/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3468,16 +2835,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} - name: "Stress test (amd_tsan)" + integration_tests_amd_binary_5_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }} + name: "Integration tests (amd_binary, 5/5)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3486,6 +2853,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 5/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3506,16 +2880,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }} - name: "Stress test (arm_asan)" + integration_tests_arm_binary_distributed_plan_1_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 1/4)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3524,6 +2898,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 1/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3544,16 +2925,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }} - name: "Stress test (amd_ubsan)" + integration_tests_arm_binary_distributed_plan_2_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 2/4)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3562,6 +2943,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 2/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3582,16 +2970,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }} - name: "Stress test (amd_msan)" + integration_tests_arm_binary_distributed_plan_3_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 3/4)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3600,6 +2988,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 3/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3620,16 +3015,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_azure_tsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCB0c2FuKQ==') }} - name: "Stress test (azure, tsan)" + integration_tests_arm_binary_distributed_plan_4_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 4/4)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3638,6 +3033,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 4/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3658,16 +3060,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (azure, tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (azure, tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_azure_msan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCBtc2FuKQ==') }} - name: "Stress test (azure, msan)" + integration_tests_amd_tsan_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} + name: "Integration tests (amd_tsan, 1/6)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3676,6 +3078,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3696,16 +3105,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (azure, msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (azure, msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }} - name: "AST fuzzer (amd_debug)" + integration_tests_amd_tsan_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} + name: "Integration tests (amd_tsan, 2/6)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3714,6 +3123,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3734,16 +3150,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYXJtX2FzYW4p') }} - name: "AST fuzzer (arm_asan)" + integration_tests_amd_tsan_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} + name: "Integration tests (amd_tsan, 3/6)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3752,6 +3168,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3772,16 +3195,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_amd_tsan: - runs-on: [self-hosted, amd-medium] + integration_tests_amd_tsan_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }} - name: "AST fuzzer (amd_tsan)" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} + name: "Integration tests (amd_tsan, 4/6)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3790,6 +3213,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3810,16 +3240,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }} - name: "AST fuzzer (amd_msan)" + integration_tests_amd_tsan_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} + name: "Integration tests (amd_tsan, 5/6)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3828,6 +3258,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3848,16 +3285,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }} - name: "AST fuzzer (amd_ubsan)" + integration_tests_amd_tsan_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} + name: "Integration tests (amd_tsan, 6/6)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3866,6 +3303,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3886,16 +3330,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }} - name: "BuzzHouse (amd_debug)" + stateless_tests_arm_coverage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_coverage] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fY292ZXJhZ2UsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (arm_coverage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3904,6 +3348,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_coverage, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3924,16 +3375,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_coverage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_coverage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhcm1fYXNhbik=') }} - name: "BuzzHouse (arm_asan)" + stateless_tests_arm_coverage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_coverage] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fY292ZXJhZ2UsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (arm_coverage, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3942,6 +3393,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_coverage, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -3962,16 +3420,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_coverage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_coverage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }} - name: "BuzzHouse (amd_tsan)" + stress_test_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }} + name: "Stress test (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -3980,6 +3438,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4000,16 +3465,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }} - name: "BuzzHouse (amd_msan)" + stress_test_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} + name: "Stress test (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -4018,6 +3483,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4038,16 +3510,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_amd_ubsan: - runs-on: [self-hosted, amd-medium] + stress_test_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }} - name: "BuzzHouse (amd_ubsan)" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }} + name: "Stress test (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -4056,6 +3528,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4076,16 +3555,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_1_3: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzMp') }} - name: "Performance Comparison (amd_release, master_head, 1/3)" + stress_test_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }} + name: "Stress test (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -4094,6 +3573,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4114,16 +3600,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_2_3: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzMp') }} - name: "Performance Comparison (amd_release, master_head, 2/3)" + ast_fuzzer_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }} + name: "AST fuzzer (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -4132,6 +3618,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4152,16 +3645,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_3_3: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzMp') }} - name: "Performance Comparison (amd_release, master_head, 3/3)" + ast_fuzzer_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }} + name: "AST fuzzer (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -4170,6 +3663,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4190,16 +3690,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_1_3: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzMp') }} - name: "Performance Comparison (arm_release, master_head, 1/3)" + ast_fuzzer_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }} + name: "AST fuzzer (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -4208,6 +3708,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4228,16 +3735,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_2_3: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzMp') }} - name: "Performance Comparison (arm_release, master_head, 2/3)" + ast_fuzzer_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }} + name: "AST fuzzer (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -4246,6 +3753,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4266,16 +3780,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_3_3: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzMp') }} - name: "Performance Comparison (arm_release, master_head, 3/3)" + buzzhouse_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }} + name: "BuzzHouse (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -4284,6 +3798,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4304,16 +3825,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_release_base_1_3: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgMS8zKQ==') }} - name: "Performance Comparison (arm_release, release_base, 1/3)" + buzzhouse_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }} + name: "BuzzHouse (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -4322,6 +3843,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4342,16 +3870,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_release_base_2_3: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgMi8zKQ==') }} - name: "Performance Comparison (arm_release, release_base, 2/3)" + buzzhouse_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }} + name: "BuzzHouse (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -4360,6 +3888,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4380,16 +3915,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_release_base_3_3: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgMy8zKQ==') }} - name: "Performance Comparison (arm_release, release_base, 3/3)" + buzzhouse_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }} + name: "BuzzHouse (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} steps: @@ -4398,6 +3933,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4418,13 +3960,13 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi clickbench_amd_release: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q2xpY2tCZW5jaCAoYW1kX3JlbGVhc2Up') }} name: "ClickBench (amd_release)" @@ -4436,6 +3978,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "ClickBench (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4462,7 +4011,7 @@ jobs: fi clickbench_arm_release: - runs-on: [self-hosted, arm-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q2xpY2tCZW5jaCAoYXJtX3JlbGVhc2Up') }} name: "ClickBench (arm_release)" @@ -4474,43 +4023,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp ./ci/tmp ./ci/tmp - mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_config_masterci.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'ClickBench (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'ClickBench (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - sqlancer_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U1FMYW5jZXIgKGFtZF9kZWJ1Zyk=') }} - name: "SQLancer (amd_debug)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "ClickBench (arm_release)" - name: Prepare env script run: | @@ -4532,13 +4050,13 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'SQLancer (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'ClickBench (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'SQLancer (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'ClickBench (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi sqltest: - runs-on: [self-hosted, arm-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U1FMVGVzdA==') }} name: "SQLTest" @@ -4550,6 +4068,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "SQLTest" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4576,8 +4101,8 @@ jobs: fi finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary, build_amd_darwin, build_arm_darwin, build_arm_v80compat, build_amd_freebsd, build_ppc64le, build_amd_compat, build_amd_musl, build_riscv64, build_s390x, build_loongarch64, build_fuzzers, unit_tests_asan, unit_tests_tsan, unit_tests_msan, unit_tests_ubsan, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, compatibility_check_release, compatibility_check_aarch64, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_distributed_plan_sequential, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_amd_debug_asyncinsert_s3_storage_parallel, stateless_tests_amd_debug_asyncinsert_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_msan_parallel_1_2, stateless_tests_amd_msan_parallel_2_2, stateless_tests_amd_msan_sequential_1_2, stateless_tests_amd_msan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_tsan_s3_storage_parallel, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, stateless_tests_arm_asan_azure_parallel, stateless_tests_arm_asan_azure_sequential, integration_tests_amd_asan_old_analyzer_1_6, integration_tests_amd_asan_old_analyzer_2_6, integration_tests_amd_asan_old_analyzer_3_6, integration_tests_amd_asan_old_analyzer_4_6, integration_tests_amd_asan_old_analyzer_5_6, integration_tests_amd_asan_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, stateless_tests_arm_coverage_parallel, stateless_tests_arm_coverage_sequential, stress_test_amd_debug, stress_test_amd_tsan, stress_test_arm_asan, stress_test_amd_ubsan, stress_test_amd_msan, stress_test_azure_tsan, stress_test_azure_msan, ast_fuzzer_amd_debug, ast_fuzzer_arm_asan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_msan, ast_fuzzer_amd_ubsan, buzzhouse_amd_debug, buzzhouse_arm_asan, buzzhouse_amd_tsan, buzzhouse_amd_msan, buzzhouse_amd_ubsan, performance_comparison_amd_release_master_head_1_3, performance_comparison_amd_release_master_head_2_3, performance_comparison_amd_release_master_head_3_3, performance_comparison_arm_release_master_head_1_3, performance_comparison_arm_release_master_head_2_3, performance_comparison_arm_release_master_head_3_3, performance_comparison_arm_release_release_base_1_3, performance_comparison_arm_release_release_base_2_3, performance_comparison_arm_release_release_base_3_3, clickbench_amd_release, clickbench_arm_release, sqlancer_amd_debug, sqltest] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_coverage, build_arm_binary, unit_tests_asan, unit_tests_tsan, unit_tests_msan, unit_tests_ubsan, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, compatibility_check_release, compatibility_check_aarch64, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_distributed_plan_sequential, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_amd_debug_asyncinsert_s3_storage_parallel, stateless_tests_amd_debug_asyncinsert_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_msan_parallel_1_2, stateless_tests_amd_msan_parallel_2_2, stateless_tests_amd_msan_sequential_1_2, stateless_tests_amd_msan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_tsan_s3_storage_parallel, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, integration_tests_amd_asan_old_analyzer_1_6, integration_tests_amd_asan_old_analyzer_2_6, integration_tests_amd_asan_old_analyzer_3_6, integration_tests_amd_asan_old_analyzer_4_6, integration_tests_amd_asan_old_analyzer_5_6, integration_tests_amd_asan_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, stateless_tests_arm_coverage_parallel, stateless_tests_arm_coverage_sequential, stress_test_amd_debug, stress_test_amd_tsan, stress_test_amd_ubsan, stress_test_amd_msan, ast_fuzzer_amd_debug, ast_fuzzer_amd_tsan, ast_fuzzer_amd_msan, ast_fuzzer_amd_ubsan, buzzhouse_amd_debug, buzzhouse_amd_tsan, buzzhouse_amd_msan, buzzhouse_amd_ubsan, clickbench_amd_release, clickbench_arm_release, sqltest] if: ${{ !cancelled() }} name: "Finish Workflow" outputs: @@ -4588,6 +4113,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp ./ci/tmp ./ci/tmp @@ -4612,3 +4144,186 @@ jobs: else python3 -m praktika run 'Finish Workflow' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi + +########################################################################################## +##################################### ALTINITY JOBS ###################################### +########################################################################################## + + GrypeScanServer: + needs: [config_workflow, docker_server_image] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + strategy: + fail-fast: false + matrix: + suffix: ['', '-alpine'] + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-server + version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + tag-suffix: ${{ matrix.suffix }} + GrypeScanKeeper: + needs: [config_workflow, docker_keeper_image] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-keeper + version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + + RegressionTestsRelease: + needs: [config_workflow, build_amd_release] + if: ${{ !failure() && !cancelled() && !contains(github.event.pull_request.body, '[x]

- + diff --git a/programs/server/merges.html b/programs/server/merges.html index 119fb058b0ba..6b1686973d25 100644 --- a/programs/server/merges.html +++ b/programs/server/merges.html @@ -3,7 +3,7 @@ ClickHouse Merges Visualizer - +