From 677b61ef6b77fe8f1a3ffc5f37d7ac18e3857a5a Mon Sep 17 00:00:00 2001 From: strtgbb <146047128+strtgbb@users.noreply.github.com> Date: Mon, 2 Feb 2026 09:31:20 -0500 Subject: [PATCH 01/16] Rebase CICD on v26.1.2.11-stable --- .../10_project-antalya-bug-report.md | 36 + .github/ISSUE_TEMPLATE/10_question.yaml | 20 - .../ISSUE_TEMPLATE/20_feature-request.yaml | 38 - .../20_project-antalya-feature-request.md | 20 + .../30_project-antalya-question.md | 16 + .../30_unexpected-behaviour.yaml | 55 - .../35_incomplete_implementation.yaml | 50 - .../40_altinity-stable-bug-report.md | 50 + .../ISSUE_TEMPLATE/45_usability-issue.yaml | 48 - .../50_altinity-stable-question.md | 16 + .github/ISSUE_TEMPLATE/50_build-issue.yaml | 50 - .../60_documentation-issue.yaml | 26 - .../ISSUE_TEMPLATE/70_performance-issue.yaml | 48 - .../80_backward-compatibility.yaml | 48 - .github/ISSUE_TEMPLATE/85_bug-report.yaml | 76 - .github/ISSUE_TEMPLATE/90_fuzzing-report.yaml | 26 - .../ISSUE_TEMPLATE/95_sanitizer-report.yaml | 26 - .../96_installation-issues.yaml | 46 - .github/PULL_REQUEST_TEMPLATE.md | 31 +- .github/actionlint.yml | 10 +- .../actions/create_workflow_report/action.yml | 52 + .../ci_run_report.html.jinja | 272 ++ .../create_workflow_report.py | 924 +++++ .../workflow_report_hook.sh | 7 + .github/actions/docker_setup/action.yml | 32 + .github/actions/runner_setup/action.yml | 19 + .github/dco.yml | 17 + .github/grype/parse_vulnerabilities_grype.py | 32 + .github/grype/run_grype_scan.sh | 18 + .../grype/transform_and_upload_results_s3.sh | 20 + .github/retry.sh | 22 + .github/workflows/README.md | 13 + .github/workflows/backport_branches.yml | 333 +- .github/workflows/cancel.yml | 19 + .github/workflows/cherry_pick.yml | 2 +- .github/workflows/compare_fails.yml | 110 + .github/workflows/create_release.yml | 2 +- .github/workflows/docker_publish.yml | 150 + .github/workflows/grype_scan.yml | 154 + .github/workflows/hourly.yml | 40 +- .github/workflows/init_praktika.yml | 27 + .github/workflows/master.yml | 3359 +++++++--------- .github/workflows/merge_queue.yml | 123 +- .github/workflows/nightly_coverage.yml | 130 +- .github/workflows/nightly_fuzzers.yml | 67 +- .github/workflows/nightly_jepsen.yml | 67 +- .github/workflows/nightly_statistics.yml | 31 +- .github/workflows/pull_request.yml | 3364 ++++++++--------- .../workflows/regression-reusable-suite.yml | 192 + .github/workflows/regression.yml | 488 +++ .github/workflows/release_branches.yml | 421 ++- .github/workflows/release_builds.yml | 1317 +++++++ .github/workflows/repo-sanity-checks.yml | 150 + .github/workflows/reusable_sign.yml | 166 + .github/workflows/scheduled_runs.yml | 55 + .github/workflows/sign_and_release.yml | 567 +++ .github/workflows/vectorsearchstress.yml | 58 +- ci/defs/defs.py | 214 +- ci/defs/job_configs.py | 116 +- ci/docker/binary-builder/Dockerfile | 8 +- ci/docker/cctools/Dockerfile | 4 +- ci/docker/docs-builder/Dockerfile | 2 +- ci/docker/fasttest/Dockerfile | 4 +- ci/docker/fuzzer/Dockerfile | 6 +- ci/docker/integration/arrowflight/Dockerfile | 2 +- ci/docker/integration/base/Dockerfile | 8 +- .../clickhouse_with_hms_catalog/Dockerfile | 2 +- .../clickhouse_with_unity_catalog/Dockerfile | 2 +- .../integration/helper_container/Dockerfile | 2 +- ci/docker/integration/kerberos_kdc/Dockerfile | 2 +- .../mysql_dotnet_client/Dockerfile | 2 +- .../mysql_golang_client/Dockerfile | 2 +- .../integration/mysql_java_client/Dockerfile | 2 +- .../integration/mysql_js_client/Dockerfile | 2 +- .../integration/mysql_php_client/Dockerfile | 2 +- .../postgresql_java_client/Dockerfile | 2 +- ci/docker/integration/resolver/Dockerfile | 2 +- ci/docker/integration/runner/Dockerfile | 8 +- ci/docker/integration/s3_proxy/Dockerfile | 2 +- ci/docker/keeper-jepsen-test/Dockerfile | 2 +- ci/docker/libfuzzer/Dockerfile | 2 +- ci/docker/performance-comparison/Dockerfile | 6 +- ci/docker/server-jepsen-test/Dockerfile | 2 +- ci/docker/sqlancer-test/Dockerfile | 2 +- ci/docker/stateless-test/Dockerfile | 8 +- ci/docker/stress-test/Dockerfile | 4 +- ci/docker/stress-test/README.md | 2 +- ci/docker/style-test/Dockerfile | 2 +- ci/docker/test-base/Dockerfile | 6 +- ci/jobs/ast_fuzzer_job.py | 2 +- ci/jobs/build_clickhouse.py | 13 +- ci/jobs/clickbench.py | 4 +- ci/jobs/fast_test.py | 4 +- ci/jobs/functional_tests.py | 48 +- ci/jobs/fuzzers_job.py | 5 +- ci/jobs/install_check.py | 9 +- ci/jobs/scripts/clickhouse_proc.py | 33 +- ci/jobs/scripts/clickhouse_version.py | 16 + ci/jobs/scripts/functional_tests_results.py | 124 +- ci/jobs/scripts/fuzzer/run-fuzzer.sh | 7 +- ci/jobs/scripts/integration_tests_configs.py | 36 +- ci/jobs/scripts/workflow_hooks/filter_job.py | 11 +- .../scripts/workflow_hooks/parse_ci_tags.py | 18 + ci/jobs/scripts/workflow_hooks/version_log.py | 35 +- ci/jobs/stress_job.py | 2 +- ci/praktika/_environment.py | 38 +- ci/praktika/execution/__main__.py | 4 + ci/praktika/execution/execution_settings.py | 2 +- ci/praktika/gh.py | 4 +- ci/praktika/hook_cache.py | 2 +- ci/praktika/job.py | 2 +- ci/praktika/native_jobs.py | 39 +- ci/praktika/parser.py | 6 + ci/praktika/result.py | 2 + ci/praktika/runner.py | 14 + ci/praktika/s3.py | 42 + ci/praktika/workflow.py | 2 + ci/praktika/yaml_additional_templates.py | 168 + ci/praktika/yaml_generator.py | 58 +- ci/settings/altinity_overrides.py | 55 + ci/settings/settings.py | 8 +- ci/workflows/backport_branches.py | 1 + ci/workflows/master.py | 33 +- ci/workflows/merge_queue.py | 4 +- ci/workflows/pull_request.py | 65 +- ci/workflows/release_branches.py | 1 + ci/workflows/release_builds.py | 69 + cmake/autogenerated_versions.txt | 8 +- cmake/version.cmake | 9 +- docker/keeper/Dockerfile | 2 +- docker/server/Dockerfile.ubuntu | 2 + docker/server/README.md | 2 +- docker/server/README.src/github-repo | 2 +- docker/server/README.src/license.md | 2 +- docker/server/README.src/logo.svg | 56 +- docker/server/README.src/maintainer.md | 2 +- docker/test/upgrade/Dockerfile | 29 + packages/clickhouse-client.yaml | 6 +- packages/clickhouse-common-static-dbg.yaml | 6 +- packages/clickhouse-common-static.yaml | 6 +- packages/clickhouse-keeper-dbg.yaml | 6 +- packages/clickhouse-keeper.yaml | 6 +- packages/clickhouse-server.yaml | 6 +- programs/server/binary.html | 5 +- programs/server/config.xml | 6 +- programs/server/dashboard.html | 2 +- programs/server/index.html | 109 +- programs/server/merges.html | 2 +- programs/server/play.html | 46 +- src/Common/SignalHandlers.cpp | 6 +- tests/broken_tests.yaml | 196 + tests/ci/changelog.py | 19 +- tests/ci/ci_buddy.py | 4 +- tests/ci/clickhouse_helper.py | 18 +- tests/ci/create_release.py | 2 +- tests/ci/env_helper.py | 17 +- tests/ci/get_robot_token.py | 14 +- tests/ci/git_helper.py | 44 +- tests/ci/pr_info.py | 26 +- .../packaging/ansible/inventory/localhost.yml | 73 + .../roles/get_cloudfront_info/tasks/main.yml | 34 + .../ansible/roles/publish_pkgs/tasks/main.yml | 98 + .../roles/update_bin_repo/tasks/main.yml | 52 + .../roles/update_deb_repo/tasks/main.yml | 61 + .../templates/apt-ftparchive-stable.conf | 6 + .../templates/apt-ftparchive.conf | 17 + .../roles/update_rpm_repo/tasks/main.yml | 51 + .../roles/update_rpm_repo/templates/repo.j2 | 7 + .../update_rpm_repo/templates/rpmmacros.j2 | 1 + .../roles/update_tar_repo/tasks/main.yml | 61 + .../packaging/ansible/sign-and-release.yml | 8 + .../release/packaging/dirindex/dirindexgen.py | 122 + .../packaging/static/bootstrap.bundle.min.js | 7 + tests/ci/s3_helper.py | 41 + tests/ci/sign_release.py | 97 + tests/ci/version_helper.py | 136 +- tests/config/config.d/azure_storage_conf.xml | 8 +- .../compose/docker_compose_arrowflight.yml | 2 +- .../compose/docker_compose_azurite.yml | 2 +- .../compose/docker_compose_clickhouse.yml | 2 +- .../compose/docker_compose_dotnet_client.yml | 2 +- .../docker_compose_iceberg_hms_catalog.yml | 2 +- .../compose/docker_compose_jdbc_bridge.yml | 1 + .../compose/docker_compose_keeper.yml | 6 +- .../docker_compose_kerberized_kafka.yml | 2 +- .../compose/docker_compose_kerberos_kdc.yml | 2 +- .../compose/docker_compose_minio.yml | 6 +- .../docker_compose_mysql_dotnet_client.yml | 2 +- .../docker_compose_mysql_golang_client.yml | 2 +- .../docker_compose_mysql_java_client.yml | 2 +- .../docker_compose_mysql_js_client.yml | 2 +- .../docker_compose_mysql_php_client.yml | 2 +- .../compose/docker_compose_nginx.yml | 2 +- .../docker_compose_postgresql_java_client.yml | 2 +- tests/integration/helpers/cluster.py | 11 +- .../test_attach_partition_using_copy/test.py | 4 +- .../test_backward_compatibility/test.py | 2 +- .../test_aggregate_fixed_key.py | 2 +- .../test_aggregate_function_state.py | 4 +- .../test_convert_ordinary.py | 2 +- .../test_cte_distributed.py | 2 +- .../test_functions.py | 2 +- .../test_insert_profile_events.py | 2 +- .../test_ip_types_binary_compatibility.py | 2 +- .../test_memory_bound_aggregation.py | 4 +- .../test_normalized_count_comparison.py | 2 +- .../test_rocksdb_upgrade.py | 2 +- .../test_select_aggregate_alias_column.py | 2 +- .../test_short_strings_aggregation.py | 12 +- ...test_vertical_merges_from_compact_parts.py | 2 +- tests/integration/test_cow_policy/test.py | 4 +- tests/integration/test_database_delta/test.py | 2 +- .../test_disk_over_web_server/test.py | 2 +- .../test.py | 2 +- .../test_lightweight_updates/test.py | 2 +- tests/integration/test_old_versions/test.py | 2 +- .../test_polymorphic_parts/test.py | 2 +- .../test.py | 4 +- .../test_replicating_constants/test.py | 4 +- .../test_trace_log_build_id/test.py | 2 +- tests/integration/test_ttl_replicated/test.py | 6 +- tests/integration/test_version_update/test.py | 2 +- .../test.py | 6 +- .../queries/0_stateless/01528_play.reference | 2 +- tests/queries/0_stateless/01528_play.sh | 2 +- utils/tests-visualizer/index.html | 2 +- 226 files changed, 11577 insertions(+), 5087 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md delete mode 100644 .github/ISSUE_TEMPLATE/10_question.yaml delete mode 100644 .github/ISSUE_TEMPLATE/20_feature-request.yaml create mode 100644 .github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md create mode 100644 .github/ISSUE_TEMPLATE/30_project-antalya-question.md delete mode 100644 .github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml delete mode 100644 .github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml create mode 100644 .github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md delete mode 100644 .github/ISSUE_TEMPLATE/45_usability-issue.yaml create mode 100644 .github/ISSUE_TEMPLATE/50_altinity-stable-question.md delete mode 100644 .github/ISSUE_TEMPLATE/50_build-issue.yaml delete mode 100644 .github/ISSUE_TEMPLATE/60_documentation-issue.yaml delete mode 100644 .github/ISSUE_TEMPLATE/70_performance-issue.yaml delete mode 100644 .github/ISSUE_TEMPLATE/80_backward-compatibility.yaml delete mode 100644 .github/ISSUE_TEMPLATE/85_bug-report.yaml delete mode 100644 .github/ISSUE_TEMPLATE/90_fuzzing-report.yaml delete mode 100644 .github/ISSUE_TEMPLATE/95_sanitizer-report.yaml delete mode 100644 .github/ISSUE_TEMPLATE/96_installation-issues.yaml create mode 100644 .github/actions/create_workflow_report/action.yml create mode 100644 .github/actions/create_workflow_report/ci_run_report.html.jinja create mode 100755 .github/actions/create_workflow_report/create_workflow_report.py create mode 100755 .github/actions/create_workflow_report/workflow_report_hook.sh create mode 100644 .github/actions/docker_setup/action.yml create mode 100644 .github/actions/runner_setup/action.yml create mode 100644 .github/dco.yml create mode 100644 .github/grype/parse_vulnerabilities_grype.py create mode 100755 .github/grype/run_grype_scan.sh create mode 100755 .github/grype/transform_and_upload_results_s3.sh create mode 100755 .github/retry.sh create mode 100644 .github/workflows/README.md create mode 100644 .github/workflows/cancel.yml create mode 100644 .github/workflows/compare_fails.yml create mode 100644 .github/workflows/docker_publish.yml create mode 100644 .github/workflows/grype_scan.yml create mode 100644 .github/workflows/init_praktika.yml create mode 100644 .github/workflows/regression-reusable-suite.yml create mode 100644 .github/workflows/regression.yml create mode 100644 .github/workflows/release_builds.yml create mode 100644 .github/workflows/repo-sanity-checks.yml create mode 100644 .github/workflows/reusable_sign.yml create mode 100644 .github/workflows/scheduled_runs.yml create mode 100644 .github/workflows/sign_and_release.yml create mode 100644 ci/jobs/scripts/workflow_hooks/parse_ci_tags.py create mode 100644 ci/praktika/yaml_additional_templates.py create mode 100644 ci/settings/altinity_overrides.py create mode 100644 ci/workflows/release_builds.py create mode 100644 docker/test/upgrade/Dockerfile create mode 100644 tests/broken_tests.yaml create mode 100644 tests/ci/release/packaging/ansible/inventory/localhost.yml create mode 100644 tests/ci/release/packaging/ansible/roles/get_cloudfront_info/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/publish_pkgs/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/update_bin_repo/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive-stable.conf create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive.conf create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/repo.j2 create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/rpmmacros.j2 create mode 100644 tests/ci/release/packaging/ansible/roles/update_tar_repo/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/sign-and-release.yml create mode 100755 tests/ci/release/packaging/dirindex/dirindexgen.py create mode 100644 tests/ci/release/packaging/static/bootstrap.bundle.min.js create mode 100644 tests/ci/sign_release.py diff --git a/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md b/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md new file mode 100644 index 000000000000..0c8c15a05eaf --- /dev/null +++ b/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md @@ -0,0 +1,36 @@ +--- +name: Project Antalya Bug Report +about: Help us improve Project Antalya +title: '' +labels: antalya +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Key information** +Provide relevant runtime details. + - Project Antalya Build Version + - Cloud provider, e.g., AWS + - Kubernetes provider, e.g., GKE or Minikube + - Object storage, e.g., AWS S3 or Minio + - Iceberg catalog, e.g., Glue with REST Proxy + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/10_question.yaml b/.github/ISSUE_TEMPLATE/10_question.yaml deleted file mode 100644 index 71a3d3da6425..000000000000 --- a/.github/ISSUE_TEMPLATE/10_question.yaml +++ /dev/null @@ -1,20 +0,0 @@ -name: Question -description: Ask a question about ClickHouse -labels: ["question"] -body: - - type: markdown - attributes: - value: | - > Make sure to check documentation https://clickhouse.com/docs/ first. If the question is concise and probably has a short answer, asking it in [community Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-1gh9ds7f4-PgDhJAaF8ad5RbWBAAjzFg) is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Question - description: Please put your question here. - validations: - required: true diff --git a/.github/ISSUE_TEMPLATE/20_feature-request.yaml b/.github/ISSUE_TEMPLATE/20_feature-request.yaml deleted file mode 100644 index 054efc2d61ee..000000000000 --- a/.github/ISSUE_TEMPLATE/20_feature-request.yaml +++ /dev/null @@ -1,38 +0,0 @@ -name: Feature request -description: Suggest an idea for ClickHouse -labels: ["feature"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Use case - description: A clear and concise description of what the intended usage scenario is. - validations: - required: true - - type: textarea - attributes: - label: Describe the solution you'd like - description: A clear and concise description of what you want to happen. - validations: - required: true - - type: textarea - attributes: - label: Describe alternatives you've considered - description: A clear and concise description of any alternative solutions or features you've considered. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context or screenshots about the feature request here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md b/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md new file mode 100644 index 000000000000..603584bf4428 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md @@ -0,0 +1,20 @@ +--- +name: Project Antalya Feature request +about: Suggest an idea for Project Antalya +title: '' +labels: antalya, enhancement +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/30_project-antalya-question.md b/.github/ISSUE_TEMPLATE/30_project-antalya-question.md new file mode 100644 index 000000000000..c77cee4a916b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/30_project-antalya-question.md @@ -0,0 +1,16 @@ +--- +name: Project Antalya Question +about: Ask a question about Project Antalya +title: '' +labels: '' +assignees: '' + +--- + +Make sure to check the [Altinity documentation](https://docs.altinity.com/) and the [Altinity Knowledge Base](https://kb.altinity.com/) first. + +If your question is concise and probably has a short answer, asking it in the [the Altinity Slack workspace](https://altinity.com/slack) is probably the fastest way to find the answer. Use the #antalya channel. + +If you'd rather file a GitHub issue, remove all this text and ask your question here. + +Please include relevant environment information as applicable. diff --git a/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml b/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml deleted file mode 100644 index 7a34c4bb7ba8..000000000000 --- a/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml +++ /dev/null @@ -1,55 +0,0 @@ -name: Unexpected behaviour -description: Some feature is working in non-obvious way -labels: ["unexpected behaviour"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the unexpected behaviour - description: A clear and concise description of what doesn't work as it is supposed to. - validations: - required: true - - type: textarea - attributes: - label: Which ClickHouse versions are affected? - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which ClickHouse server version to use - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Expected behavior - description: A clear and concise description of what you expected to happen. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml b/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml deleted file mode 100644 index 969c1893e6f5..000000000000 --- a/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml +++ /dev/null @@ -1,50 +0,0 @@ -name: Incomplete implementation -description: Implementation of existing feature is not finished -labels: ["unfinished code"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the unexpected behaviour - description: A clear and concise description of what works not as it is supposed to. - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which ClickHouse server version to use - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Expected behavior - description: A clear and concise description of what you expected to happen. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md b/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md new file mode 100644 index 000000000000..90bf241dc195 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md @@ -0,0 +1,50 @@ +--- +name: Altinity Stable Bug report +about: Report something broken in an Altinity Stable Build +title: '' +labels: stable +assignees: '' + +--- + +✅ *I checked [the Altinity Stable Builds lifecycle table](https://docs.altinity.com/altinitystablebuilds/#altinity-stable-builds-life-cycle-table), and the Altinity Stable Build version I'm using is still supported.* + +## Type of problem +Choose one of the following items, then delete the others: + +**Bug report** - something's broken + +**Incomplete implementation** - something's not quite right + +**Performance issue** - something works, just not as quickly as it should + +**Backwards compatibility issue** - something used to work, but now it doesn't + +**Unexpected behavior** - something surprising happened, but it wasn't the good kind of surprise + +**Installation issue** - something doesn't install the way it should + +**Usability issue** - something works, but it could be a lot easier + +**Documentation issue** - something in the docs is wrong, incomplete, or confusing + +## Describe the situation +A clear, concise description of what's happening. Can you reproduce it in a ClickHouse Official build of the same version? + +## How to reproduce the behavior + +* Which Altinity Stable Build version to use +* Which interface to use, if it matters +* Non-default settings, if any +* `CREATE TABLE` statements for all tables involved +* Sample data for all these tables, use the [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/31fd4f5eb41d5ec26724fc645c11fe4d62eae07f/programs/obfuscator/README.md) if necessary +* Queries to run that lead to an unexpected result + +## Expected behavior +A clear, concise description of what you expected to happen. + +## Logs, error messages, stacktraces, screenshots... +Add any details that might explain the issue. + +## Additional context +Add any other context about the issue here. diff --git a/.github/ISSUE_TEMPLATE/45_usability-issue.yaml b/.github/ISSUE_TEMPLATE/45_usability-issue.yaml deleted file mode 100644 index 0d2ae1a580e5..000000000000 --- a/.github/ISSUE_TEMPLATE/45_usability-issue.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: Usability improvement request -description: Report something can be made more convenient to use -labels: ["usability"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the improvement - description: A clear and concise description of what you want to happen - validations: - required: true - - type: textarea - attributes: - label: Which ClickHouse versions are affected? - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md b/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md new file mode 100644 index 000000000000..027970e25a02 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md @@ -0,0 +1,16 @@ +--- +name: Altinity Stable Question +about: Ask a question about an Altinity Stable Build +title: '' +labels: question, stable +assignees: '' + +--- + +Make sure to check the [Altinity documentation](https://docs.altinity.com/) and the [Altinity Knowledge Base](https://kb.altinity.com/) first. + +If your question is concise and probably has a short answer, asking it in the [the Altinity Slack channel](https://altinity.com/slack) is probably the fastest way to find the answer. + +For more complicated questions, consider [asking them on StackOverflow with the tag "clickhouse"](https://stackoverflow.com/questions/tagged/clickhouse). + +If you'd rather file a GitHub issue, remove all this text and ask your question here. diff --git a/.github/ISSUE_TEMPLATE/50_build-issue.yaml b/.github/ISSUE_TEMPLATE/50_build-issue.yaml deleted file mode 100644 index 0549944c0bb2..000000000000 --- a/.github/ISSUE_TEMPLATE/50_build-issue.yaml +++ /dev/null @@ -1,50 +0,0 @@ -name: Build issue -description: Report failed ClickHouse build from master -labels: ["build"] -body: - - type: markdown - attributes: - value: | - > Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/ - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the problem - description: A clear and concise description of what doesn't work as it is supposed to. - validations: - required: true - - type: textarea - attributes: - label: Operating system - description: OS kind or distribution, specific version/release, non-standard kernel if any. If you are trying to build inside virtual machine, please mention it too. - validations: - required: false - - type: textarea - attributes: - label: CMake version - description: The output of `cmake --version`. - validations: - required: false - - type: textarea - attributes: - label: Ninja version - description: The output of `ninja --version`. - validations: - required: false - - type: textarea - attributes: - label: Compiler name and version - description: We recommend to use clang. The version can be obtained via `clang --version`. - validations: - required: false - - type: textarea - attributes: - label: Full cmake and/or ninja output with the error - description: Please include everything (use https://pastila.nl/ for large output)! - validations: - required: true diff --git a/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml b/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml deleted file mode 100644 index bba6df87a783..000000000000 --- a/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Documentation issue -description: Report something incorrect or missing in documentation -labels: ["comp-documentation"] -body: - - type: markdown - attributes: - value: | - > Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/ - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the issue - description: A clear and concise description of what's wrong in documentation. - validations: - required: true - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/70_performance-issue.yaml b/.github/ISSUE_TEMPLATE/70_performance-issue.yaml deleted file mode 100644 index 1df99dc76fda..000000000000 --- a/.github/ISSUE_TEMPLATE/70_performance-issue.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: Performance issue -description: Report something working slower than expected -labels: ["performance"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the situation - description: What exactly works slower than expected? - validations: - required: true - - type: textarea - attributes: - label: Which ClickHouse versions are affected? - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Expected performance - description: What are your performance expectation, why do you think they are realistic? Has it been working faster in older ClickHouse releases? Is it working faster in some specific other system? - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml b/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml deleted file mode 100644 index 72f56d781979..000000000000 --- a/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: Backward compatibility issue -description: Report the case when the behaviour of a new version can break existing use cases -labels: ["backward compatibility"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the unexpected behaviour - description: A clear and concise description of what works not as it is supposed to. - validations: - required: true - - type: textarea - attributes: - label: Which ClickHouse versions are affected? - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/85_bug-report.yaml b/.github/ISSUE_TEMPLATE/85_bug-report.yaml deleted file mode 100644 index 349bf82a3a4e..000000000000 --- a/.github/ISSUE_TEMPLATE/85_bug-report.yaml +++ /dev/null @@ -1,76 +0,0 @@ -name: Bug report -description: Wrong behavior (visible to users) in the official ClickHouse release. -labels: ["potential bug"] -body: - - type: markdown - attributes: - value: | - > Please make sure that the version you're using is still supported (you can find the list [here](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md#scope-and-supported-versions)). - > You have to provide the following information whenever possible. - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe what's wrong - description: | - * A clear and concise description of what works not as it is supposed to. - * A link to reproducer in [https://fiddle.clickhouse.com/](https://fiddle.clickhouse.com/). - validations: - required: true - - type: dropdown - attributes: - label: Does it reproduce on the most recent release? - description: | - [The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv) - options: - - 'Yes' - - 'No' - validations: - required: true - - type: markdown - attributes: - value: | - ----- - > Change "enabled" to true in "send_crash_reports" section in `config.xml`: - ```xml - - - - true - - ``` - ----- - - type: textarea - attributes: - label: How to reproduce - description: | - * Which ClickHouse server version to use - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Expected behavior - description: A clear and concise description of what you expected to happen. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml b/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml deleted file mode 100644 index 84dc8a372e5a..000000000000 --- a/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Assertion found via fuzzing -description: Potential issue has been found via Fuzzer or Stress tests -labels: ["fuzz"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Describe the bug - description: A link to the report. - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: Try to reproduce the report and copy the tables and queries involved. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: You can find additional information in server logs. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml b/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml deleted file mode 100644 index 7bb47e2b824b..000000000000 --- a/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Sanitizer alert -description: Potential issue has been found by special code instrumentation -labels: ["testing"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Describe the bug - description: A link to the report. - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: Try to reproduce the report and copy the tables and queries involved. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: You can find additional information in server logs. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/96_installation-issues.yaml b/.github/ISSUE_TEMPLATE/96_installation-issues.yaml deleted file mode 100644 index f71f6079453e..000000000000 --- a/.github/ISSUE_TEMPLATE/96_installation-issues.yaml +++ /dev/null @@ -1,46 +0,0 @@ -name: Installation issue -description: Issue with ClickHouse installation from https://clickhouse.com/docs/en/install/ -labels: ["comp-install"] -body: - - type: markdown - attributes: - value: | - > **I have tried the following solutions**: https://clickhouse.com/docs/en/faq/troubleshooting/#troubleshooting-installation-errors - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Installation type - description: Packages, docker, single binary, curl? - validations: - required: true - - type: textarea - attributes: - label: Source of the ClickHouse - description: A link to the source. Or the command you've tried. - validations: - required: true - - type: textarea - attributes: - label: Describe the problem. - description: What went wrong and what is the expected result? - validations: - required: true - - type: textarea - attributes: - label: Error message and/or stacktrace - description: You can find additional information in server logs. - validations: - required: false - - type: textarea - attributes: - label: How to reproduce - description: | - * For Linux-based operating systems: provide a script for clear docker container from the official image - * For anything else: steps to reproduce on as much as possible clear system - validations: - required: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index c81bd0de0ff7..0175bef62a14 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -16,20 +16,19 @@ ... ### Documentation entry for user-facing changes +... -- [ ] Documentation is written (mandatory for new features) - - +#### Exclude tests: +- [ ] Fast test +- [ ] Integration Tests +- [ ] Stateless tests +- [ ] Stateful tests +- [ ] Performance tests +- [ ] All with ASAN +- [ ] All with TSAN +- [ ] All with MSAN +- [ ] All with UBSAN +- [ ] All with Coverage +- [ ] All with Aarch64 +- [ ] All Regression +- [ ] Disable CI Cache diff --git a/.github/actionlint.yml b/.github/actionlint.yml index cf5f575e3c74..904a548dadd5 100644 --- a/.github/actionlint.yml +++ b/.github/actionlint.yml @@ -1,9 +1,9 @@ self-hosted-runner: labels: - - builder - - func-tester - - func-tester-aarch64 + - altinity-builder + - altinity-func-tester + - altinity-func-tester-aarch64 - fuzzer-unit-tester - - style-checker - - style-checker-aarch64 + - altinity-style-checker + - altinity-style-checker-aarch64 - release-maker diff --git a/.github/actions/create_workflow_report/action.yml b/.github/actions/create_workflow_report/action.yml new file mode 100644 index 000000000000..8c975ec345c5 --- /dev/null +++ b/.github/actions/create_workflow_report/action.yml @@ -0,0 +1,52 @@ +name: Create and Upload Combined Report +description: Create and upload a combined CI report +inputs: + workflow_config: + description: "Workflow config" + required: true + final: + description: "Control whether the report is final or a preview" + required: false + default: "false" +runs: + using: "composite" + steps: + - name: Create workflow config + shell: bash + run: | + mkdir -p ./ci/tmp + cat > ./ci/tmp/workflow_config.json << 'EOF' + ${{ inputs.workflow_config }} + EOF + + - name: Create and upload workflow report + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + ACTIONS_RUN_URL: ${{ github.event.repository.html_url }}/actions/runs/${{ github.run_id }} + COMMIT_SHA: ${{ steps.set_version.outputs.commit_sha || github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + FINAL: ${{ inputs.final }} + shell: bash + run: | + pip install clickhouse-driver==0.2.8 numpy==1.26.4 pandas==2.0.3 jinja2==3.1.5 + + CMD="python3 .github/actions/create_workflow_report/create_workflow_report.py" + ARGS="--actions-run-url $ACTIONS_RUN_URL --known-fails tests/broken_tests.yaml --cves --pr-number $PR_NUMBER" + + set +e -x + if [[ "$FINAL" == "false" ]]; then + REPORT_LINK=$($CMD $ARGS --mark-preview) + else + REPORT_LINK=$($CMD $ARGS) + fi + + echo $REPORT_LINK + + if [[ "$FINAL" == "true" ]]; then + IS_VALID_URL=$(echo $REPORT_LINK | grep -E '^https?://') + if [[ -n $IS_VALID_URL ]]; then + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + else + echo "Error: $REPORT_LINK" >> $GITHUB_STEP_SUMMARY + exit 1 + fi + fi diff --git a/.github/actions/create_workflow_report/ci_run_report.html.jinja b/.github/actions/create_workflow_report/ci_run_report.html.jinja new file mode 100644 index 000000000000..ac9a7a70bb01 --- /dev/null +++ b/.github/actions/create_workflow_report/ci_run_report.html.jinja @@ -0,0 +1,272 @@ + + + + + + + + {%- if is_preview %} + + {%- endif %} + + {{ title }} + + + + +

+ +

{{ title }}

+ + + + + + + + + + + + + + + + + + + + + + + +
Pull Request{{ pr_info_html }}
Workflow Run{{ workflow_id }}
Commit{{ commit_sha }}
Build Report{% for job_name, link in build_report_links.items() %}[{{ job_name }}] {% endfor %}
Date {{ date }}
+ {% if is_preview %} +

This is a preview. The workflow is not yet finished.

+ {% endif %} +

Table of Contents

+ + + {%- if pr_number != 0 -%} +

New Fails in PR

+

Compared with base sha {{ base_sha }}

+ {{ new_fails_html }} + {%- endif %} + +

CI Jobs Status

+ {{ ci_jobs_status_html }} + +

Checks Errors

+ {{ checks_errors_html }} + +

Checks New Fails

+ {{ checks_fails_html }} + +

Regression New Fails

+ {{ regression_fails_html }} + +

Docker Images CVEs

+ {{ docker_images_cves_html }} + +

Checks Known Fails

+

+ Fail reason conventions:
+ KNOWN - Accepted fail and fix is not planned
+ INVESTIGATE - We don't know why it fails
+ NEEDSFIX - Investigation done and a fix is needed to make it pass
+

+ {{ checks_known_fails_html }} + + + + \ No newline at end of file diff --git a/.github/actions/create_workflow_report/create_workflow_report.py b/.github/actions/create_workflow_report/create_workflow_report.py new file mode 100755 index 000000000000..f5c25b3e6e36 --- /dev/null +++ b/.github/actions/create_workflow_report/create_workflow_report.py @@ -0,0 +1,924 @@ +#!/usr/bin/env python3 +import argparse +import os +from pathlib import Path +from itertools import combinations +import json +from datetime import datetime +from functools import lru_cache +from glob import glob +import urllib.parse +import re + +import pandas as pd +from jinja2 import Environment, FileSystemLoader +import requests +from clickhouse_driver import Client +import boto3 +from botocore.exceptions import NoCredentialsError +import yaml + + +DATABASE_HOST_VAR = "CHECKS_DATABASE_HOST" +DATABASE_USER_VAR = "CLICKHOUSE_TEST_STAT_LOGIN" +DATABASE_PASSWORD_VAR = "CLICKHOUSE_TEST_STAT_PASSWORD" +S3_BUCKET = "altinity-build-artifacts" +GITHUB_REPO = "Altinity/ClickHouse" +GITHUB_TOKEN = os.getenv("GITHUB_TOKEN") or os.getenv("GH_TOKEN") + +def get_commit_statuses(sha: str) -> pd.DataFrame: + """ + Fetch commit statuses for a given SHA and return as a pandas DataFrame. + Handles pagination to get all statuses. + + Args: + sha (str): Commit SHA to fetch statuses for. + + Returns: + pd.DataFrame: DataFrame containing all statuses. + """ + headers = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json", + } + + url = f"https://api.github.com/repos/{GITHUB_REPO}/commits/{sha}/statuses" + + all_data = [] + + while url: + response = requests.get(url, headers=headers) + + if response.status_code != 200: + raise Exception( + f"Failed to fetch statuses: {response.status_code} {response.text}" + ) + + data = response.json() + all_data.extend(data) + + # Check for pagination links in the response headers + if "Link" in response.headers: + links = response.headers["Link"].split(",") + next_url = None + + for link in links: + parts = link.strip().split(";") + if len(parts) == 2 and 'rel="next"' in parts[1]: + next_url = parts[0].strip("<>") + break + + url = next_url + else: + url = None + + # Parse relevant fields + parsed = [ + { + "job_name": item["context"], + "job_status": item["state"], + "message": item["description"], + "results_link": item["target_url"], + } + for item in all_data + ] + + # Create DataFrame + df = pd.DataFrame(parsed) + + # Drop duplicates keeping the first occurrence (newest status for each context) + # GitHub returns statuses in reverse chronological order + df = df.drop_duplicates(subset=["job_name"], keep="first") + + # Sort by status and job name + return df.sort_values( + by=["job_status", "job_name"], ascending=[True, True] + ).reset_index(drop=True) + + +def get_pr_info_from_number(pr_number: str) -> dict: + """ + Fetch pull request information for a given PR number. + + Args: + pr_number (str): Pull request number to fetch information for. + + Returns: + dict: Dictionary containing PR information. + """ + headers = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json", + } + + url = f"https://api.github.com/repos/{GITHUB_REPO}/pulls/{pr_number}" + response = requests.get(url, headers=headers) + + if response.status_code != 200: + raise Exception( + f"Failed to fetch pull request info: {response.status_code} {response.text}" + ) + + return response.json() + + +def get_run_details(run_id: str) -> dict: + """ + Fetch run details for a given run URL. + """ + headers = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json", + } + + url = f"https://api.github.com/repos/{GITHUB_REPO}/actions/runs/{run_id}" + response = requests.get(url, headers=headers) + + if response.status_code != 200: + raise Exception( + f"Failed to fetch run details: {response.status_code} {response.text}" + ) + + return response.json() + + +def get_checks_fails(client: Client, commit_sha: str, branch_name: str): + """ + Get tests that did not succeed for the given commit and branch. + Exclude checks that have status 'error' as they are counted in get_checks_errors. + """ + query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link + FROM ( + SELECT + argMax(check_status, check_start_time) as job_status, + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE test_status IN ('FAIL', 'ERROR') + AND job_status!='error' + ORDER BY job_name, test_name + """ + return client.query_dataframe(query) + + +def get_broken_tests_rules(broken_tests_file_path): + with open(broken_tests_file_path, "r", encoding="utf-8") as broken_tests_file: + broken_tests = yaml.safe_load(broken_tests_file) + + compiled_rules = {"exact": {}, "pattern": {}} + + for test in broken_tests: + regex = test.get("regex") is True + rule = { + "reason": test["reason"], + } + + if test.get("check_types"): + rule["check_types"] = test["check_types"] + + if regex: + rule["regex"] = True + compiled_rules["pattern"][re.compile(test["name"])] = rule + else: + compiled_rules["exact"][test["name"]] = rule + + return compiled_rules + + +def get_known_fail_reason(test_name: str, check_name: str, known_fails: dict): + """ + Returns the reason why a test is known to fail based on its name and build context. + + - Exact-name rules are checked first. + - Pattern-name rules are checked next (first match wins). + - Message/not_message conditions are ignored. + """ + # 1. Exact-name rules + rule_data = known_fails["exact"].get(test_name) + if rule_data: + check_types = rule_data.get("check_types", []) + if not check_types or any( + check_type in check_name for check_type in check_types + ): + return rule_data["reason"] + + # 2. Pattern-name rules + for name_re, rule_data in known_fails["pattern"].items(): + if name_re.fullmatch(test_name): + check_types = rule_data.get("check_types", []) + if not check_types or any( + check_type in check_name for check_type in check_types + ): + return rule_data["reason"] + + return "No reason given" + + +def get_checks_known_fails( + client: Client, commit_sha: str, branch_name: str, known_fails: dict +): + """ + Get tests that are known to fail for the given commit and branch. + """ + if len(known_fails) == 0: + return pd.DataFrame() + + query = f"""SELECT job_name, status as test_status, test_name, results_link + FROM ( + SELECT + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE test_status='BROKEN' + ORDER BY job_name, test_name + """ + + df = client.query_dataframe(query) + + if df.shape[0] == 0: + return df + + df.insert( + len(df.columns) - 1, + "reason", + df.apply( + lambda row: get_known_fail_reason( + row["test_name"], row["job_name"], known_fails + ), + axis=1, + ), + ) + + return df + + +def get_checks_errors(client: Client, commit_sha: str, branch_name: str): + """ + Get checks that have status 'error' for the given commit and branch. + """ + query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link + FROM ( + SELECT + argMax(check_status, check_start_time) as job_status, + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE job_status=='error' + ORDER BY job_name, test_name + """ + return client.query_dataframe(query) + + +def drop_prefix_rows(df, column_to_clean): + """ + Drop rows from the dataframe if: + - the row matches another row completely except for the specified column + - the specified column of that row is a prefix of the same column in another row + """ + to_drop = set() + reference_columns = [col for col in df.columns if col != column_to_clean] + for (i, row_1), (j, row_2) in combinations(df.iterrows(), 2): + if all(row_1[col] == row_2[col] for col in reference_columns): + if row_2[column_to_clean].startswith(row_1[column_to_clean]): + to_drop.add(i) + elif row_1[column_to_clean].startswith(row_2[column_to_clean]): + to_drop.add(j) + return df.drop(to_drop) + + +def get_regression_fails(client: Client, job_url: str): + """ + Get regression tests that did not succeed for the given job URL. + """ + # If you rename the alias for report_url, also update the formatters in format_results_as_html_table + # Nested SELECT handles test reruns + query = f"""SELECT arch, job_name, status, test_name, results_link + FROM ( + SELECT + architecture as arch, + test_name, + argMax(result, start_time) AS status, + job_name, + report_url as results_link, + job_url + FROM `gh-data`.clickhouse_regression_results + GROUP BY architecture, test_name, job_url, job_name, report_url + ORDER BY length(test_name) DESC + ) + WHERE job_url LIKE '{job_url}%' + AND status IN ('Fail', 'Error') + """ + df = client.query_dataframe(query) + df = drop_prefix_rows(df, "test_name") + df["job_name"] = df["job_name"].str.title() + return df + + +def get_new_fails_this_pr( + client: Client, + pr_info: dict, + checks_fails: pd.DataFrame, + regression_fails: pd.DataFrame, +): + """ + Get tests that failed in the PR but passed in the base branch. + Compares both checks and regression test results. + """ + base_sha = pr_info.get("base", {}).get("sha") + if not base_sha: + raise Exception("No base SHA found for PR") + + # Modify tables to have the same columns + if len(checks_fails) > 0: + checks_fails = checks_fails.copy().drop(columns=["job_status"]) + if len(regression_fails) > 0: + regression_fails = regression_fails.copy() + regression_fails["job_name"] = regression_fails.apply( + lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1 + ) + regression_fails["test_status"] = regression_fails["status"] + + # Combine both types of fails and select only desired columns + desired_columns = ["job_name", "test_name", "test_status", "results_link"] + all_pr_fails = pd.concat([checks_fails, regression_fails], ignore_index=True)[ + desired_columns + ] + if len(all_pr_fails) == 0: + return pd.DataFrame() + + # Get all checks from the base branch that didn't fail + base_checks_query = f"""SELECT job_name, status as test_status, test_name, results_link + FROM ( + SELECT + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{base_sha}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE test_status NOT IN ('FAIL', 'ERROR') + ORDER BY job_name, test_name + """ + base_checks = client.query_dataframe(base_checks_query) + + # Get regression results from base branch that didn't fail + base_regression_query = f"""SELECT arch, job_name, status, test_name, results_link + FROM ( + SELECT + architecture as arch, + test_name, + argMax(result, start_time) AS status, + job_url, + job_name, + report_url as results_link + FROM `gh-data`.clickhouse_regression_results + WHERE results_link LIKE'%/{base_sha}/%' + GROUP BY architecture, test_name, job_url, job_name, report_url + ORDER BY length(test_name) DESC + ) + WHERE status NOT IN ('Fail', 'Error') + """ + base_regression = client.query_dataframe(base_regression_query) + if len(base_regression) > 0: + base_regression["job_name"] = base_regression.apply( + lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1 + ) + base_regression["test_status"] = base_regression["status"] + base_regression = base_regression.drop(columns=["arch", "status"]) + + # Combine base results + base_results = pd.concat([base_checks, base_regression], ignore_index=True) + + # Find tests that failed in PR but passed in base + pr_failed_tests = set(zip(all_pr_fails["job_name"], all_pr_fails["test_name"])) + base_passed_tests = set(zip(base_results["job_name"], base_results["test_name"])) + + new_fails = pr_failed_tests.intersection(base_passed_tests) + + # Filter PR results to only include new fails + mask = all_pr_fails.apply( + lambda row: (row["job_name"], row["test_name"]) in new_fails, axis=1 + ) + new_fails_df = all_pr_fails[mask] + + return new_fails_df + + +@lru_cache +def get_workflow_config() -> dict: + + # 25.12+ + if os.path.exists("./ci/tmp/workflow_status.json"): + with open("./ci/tmp/workflow_status.json", "r") as f: + return json.loads(json.load(f)["config_workflow"]["outputs"]["data"])[ + "workflow_config" + ] + + workflow_config_files = glob("./ci/tmp/workflow_config*.json") + if len(workflow_config_files) == 0: + raise Exception("No workflow config file found") + if len(workflow_config_files) > 1: + raise Exception("Multiple workflow config files found") + with open(workflow_config_files[0], "r") as f: + return json.load(f) + + +def get_cached_job(job_name: str) -> dict: + workflow_config = get_workflow_config() + return workflow_config["cache_jobs"].get(job_name, {}) + + +def get_cves(pr_number, commit_sha, branch): + """ + Fetch Grype results from S3. + + If no results are available for download, returns ... (Ellipsis). + """ + s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL")) + prefixes_to_check = set() + + def format_prefix(pr_number, commit_sha, branch): + if pr_number == 0: + return f"REFs/{branch}/{commit_sha}/grype/" + else: + return f"PRs/{pr_number}/{commit_sha}/grype/" + + cached_server_job = get_cached_job("Docker server image") + if cached_server_job: + prefixes_to_check.add( + format_prefix( + cached_server_job["pr_number"], + cached_server_job["sha"], + cached_server_job["branch"], + ) + ) + cached_keeper_job = get_cached_job("Docker keeper image") + if cached_keeper_job: + prefixes_to_check.add( + format_prefix( + cached_keeper_job["pr_number"], + cached_keeper_job["sha"], + cached_keeper_job["branch"], + ) + ) + + if not prefixes_to_check: + prefixes_to_check = {format_prefix(pr_number, commit_sha, branch)} + + grype_result_dirs = [] + for s3_prefix in prefixes_to_check: + try: + response = s3_client.list_objects_v2( + Bucket=S3_BUCKET, Prefix=s3_prefix, Delimiter="/" + ) + grype_result_dirs.extend( + content["Prefix"] for content in response.get("CommonPrefixes", []) + ) + except Exception as e: + print(f"Error listing S3 objects at {s3_prefix}: {e}") + continue + + if len(grype_result_dirs) == 0: + # We were asked to check the CVE data, but none was found, + # maybe this is a preview report and grype results are not available yet + return ... + + results = [] + for path in grype_result_dirs: + file_key = f"{path}result.json" + try: + file_response = s3_client.get_object(Bucket=S3_BUCKET, Key=file_key) + content = file_response["Body"].read().decode("utf-8") + results.append(json.loads(content)) + except Exception as e: + print(f"Error getting S3 object at {file_key}: {e}") + continue + + rows = [] + for scan_result in results: + for match in scan_result["matches"]: + rows.append( + { + "docker_image": scan_result["source"]["target"]["userInput"], + "severity": match["vulnerability"]["severity"], + "identifier": match["vulnerability"]["id"], + "namespace": match["vulnerability"]["namespace"], + } + ) + + if len(rows) == 0: + return pd.DataFrame() + + df = pd.DataFrame(rows).drop_duplicates() + df = df.sort_values( + by="severity", + key=lambda col: col.str.lower().map( + {"critical": 1, "high": 2, "medium": 3, "low": 4, "negligible": 5} + ), + ) + return df + + +def url_to_html_link(url: str) -> str: + if not url: + return "" + text = url.split("/")[-1].split("?")[0] + if not text: + text = "results" + return f'{text}' + + +def format_test_name_for_linewrap(text: str) -> str: + """Tweak the test name to improve line wrapping.""" + return f'{text}' + + +def format_test_status(text: str) -> str: + """Format the test status for better readability.""" + if text.lower().startswith("fail"): + color = "red" + elif text.lower() == "skipped": + color = "grey" + elif text.lower() in ("success", "ok", "passed", "pass"): + color = "green" + else: + color = "orange" + + return f'{text}' + + +def format_results_as_html_table(results) -> str: + if len(results) == 0: + return "

Nothing to report

" + results.columns = [col.replace("_", " ").title() for col in results.columns] + html = results.to_html( + index=False, + formatters={ + "Results Link": url_to_html_link, + "Test Name": format_test_name_for_linewrap, + "Test Status": format_test_status, + "Job Status": format_test_status, + "Status": format_test_status, + "Message": lambda m: m.replace("\n", " "), + "Identifier": lambda i: url_to_html_link( + "https://nvd.nist.gov/vuln/detail/" + i + ), + }, + escape=False, + border=0, + classes=["test-results-table"], + ) + return html + + +def backfill_skipped_statuses( + job_statuses: pd.DataFrame, pr_number: int, branch: str, commit_sha: str +): + """ + Fill in the job statuses for skipped jobs. + """ + + if pr_number == 0: + ref_param = f"REF={branch}" + workflow_name = "MasterCI" + else: + ref_param = f"PR={pr_number}" + workflow_name = "PR" + + status_file = f"result_{workflow_name.lower()}.json" + s3_path = f"https://{S3_BUCKET}.s3.amazonaws.com/{ref_param.replace('=', 's/')}/{commit_sha}/{status_file}" + response = requests.get(s3_path) + + if response.status_code != 200: + return job_statuses + + status_data = response.json() + skipped_jobs = [] + for job in status_data["results"]: + if job["status"] == "skipped" and len(job["links"]) > 0: + skipped_jobs.append( + { + "job_name": job["name"], + "job_status": job["status"], + "message": job["info"], + "results_link": job["links"][0], + } + ) + + return pd.concat([job_statuses, pd.DataFrame(skipped_jobs)], ignore_index=True) + + +def get_build_report_links( + job_statuses: pd.DataFrame, pr_number: int, branch: str, commit_sha: str +): + """ + Get the build report links for the given PR number, branch, and commit SHA. + + First checks if a build job submitted a success or skipped status. + If not available, it guesses the links. + """ + build_job_names = [ + "Build (amd_release)", + "Build (arm_release)", + "Docker server image", + "Docker keeper image", + ] + build_report_links = {} + + for job in job_statuses.itertuples(): + if ( + job.job_name in build_job_names + and job.job_status + in ( + "success", + "skipped", + ) + and job.results_link + ): + build_report_links[job.job_name] = job.results_link + + if 0 < len(build_report_links) < len(build_job_names): + # Only have some of the build jobs, guess the rest. + # (It was straightforward to force the build jobs to always appear in the cache, + # however doing the same for the docker image jobs is difficult.) + ref_job, ref_link = list(build_report_links.items())[0] + link_template = ref_link.replace( + urllib.parse.quote(ref_job, safe=""), "{job_name}" + ) + for job in build_job_names: + if job not in build_report_links: + build_report_links[job] = link_template.format(job_name=job) + + if len(build_report_links) > 0: + return build_report_links + + # No cache or build result was found, guess the links + if pr_number == 0: + ref_param = f"REF={branch}" + workflow_name = "MasterCI" + else: + ref_param = f"PR={pr_number}" + workflow_name = "PR" + + build_report_link_base = f"https://{S3_BUCKET}.s3.amazonaws.com/json.html?{ref_param}&sha={commit_sha}&name_0={urllib.parse.quote(workflow_name, safe='')}" + build_report_links = { + job_name: f"{build_report_link_base}&name_1={urllib.parse.quote(job_name, safe='')}" + for job_name in build_job_names + } + return build_report_links + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Create a combined CI report.") + parser.add_argument( # Need the full URL rather than just the ID to query the databases + "--actions-run-url", required=True, help="URL of the actions run" + ) + parser.add_argument( + "--pr-number", help="Pull request number for the S3 path", type=int + ) + parser.add_argument("--commit-sha", help="Commit SHA for the S3 path") + parser.add_argument( + "--no-upload", action="store_true", help="Do not upload the report" + ) + parser.add_argument( + "--known-fails", type=str, help="Path to the file with known fails" + ) + parser.add_argument( + "--cves", action="store_true", help="Get CVEs from Grype results" + ) + parser.add_argument( + "--mark-preview", action="store_true", help="Mark the report as a preview" + ) + return parser.parse_args() + + +def create_workflow_report( + actions_run_url: str, + pr_number: int = None, + commit_sha: str = None, + no_upload: bool = False, + known_fails_file_path: str = None, + check_cves: bool = False, + mark_preview: bool = False, +) -> str: + + host = os.getenv(DATABASE_HOST_VAR) + if not host: + print(f"{DATABASE_HOST_VAR} is not set") + user = os.getenv(DATABASE_USER_VAR) + if not user: + print(f"{DATABASE_USER_VAR} is not set") + password = os.getenv(DATABASE_PASSWORD_VAR) + if not password: + print(f"{DATABASE_PASSWORD_VAR} is not set") + if not GITHUB_TOKEN: + print("GITHUB_TOKEN is not set") + if not all([host, user, password, GITHUB_TOKEN]): + raise Exception("Required environment variables are not set") + + run_id = actions_run_url.split("/")[-1] + + run_details = get_run_details(run_id) + branch_name = run_details.get("head_branch", "unknown branch") + if pr_number is None or commit_sha is None: + if pr_number is None: + if len(run_details["pull_requests"]) > 0: + pr_number = run_details["pull_requests"][0]["number"] + else: + pr_number = 0 + if commit_sha is None: + commit_sha = run_details["head_commit"]["id"] + + db_client = Client( + host=host, + user=user, + password=password, + port=9440, + secure="y", + verify=False, + settings={"use_numpy": True}, + ) + + fail_results = { + "job_statuses": get_commit_statuses(commit_sha), + "checks_fails": get_checks_fails(db_client, commit_sha, branch_name), + "checks_known_fails": [], + "pr_new_fails": [], + "checks_errors": get_checks_errors(db_client, commit_sha, branch_name), + "regression_fails": get_regression_fails(db_client, actions_run_url), + "docker_images_cves": ( + [] if not check_cves else get_cves(pr_number, commit_sha, branch_name) + ), + } + + # get_cves returns ... in the case where no Grype result files were found. + # This might occur when run in preview mode. + cves_not_checked = not check_cves or fail_results["docker_images_cves"] is ... + + if known_fails_file_path: + if not os.path.exists(known_fails_file_path): + print(f"WARNING:Known fails file {known_fails_file_path} not found.") + else: + known_fails = get_broken_tests_rules(known_fails_file_path) + + fail_results["checks_known_fails"] = get_checks_known_fails( + db_client, commit_sha, branch_name, known_fails + ) + + if pr_number == 0: + pr_info_html = f"Release ({branch_name})" + else: + try: + pr_info = get_pr_info_from_number(pr_number) + pr_info_html = f""" + #{pr_info.get("number")} ({pr_info.get("base", {}).get('ref')} <- {pr_info.get("head", {}).get('ref')}) {pr_info.get("title")} + """ + fail_results["pr_new_fails"] = get_new_fails_this_pr( + db_client, + pr_info, + fail_results["checks_fails"], + fail_results["regression_fails"], + ) + except Exception as e: + pr_info_html = e + + fail_results["job_statuses"] = backfill_skipped_statuses( + fail_results["job_statuses"], pr_number, branch_name, commit_sha + ) + + high_cve_count = 0 + if not cves_not_checked and len(fail_results["docker_images_cves"]) > 0: + high_cve_count = ( + fail_results["docker_images_cves"]["severity"] + .str.lower() + .isin(("high", "critical")) + .sum() + ) + + # Load the template + template = Environment( + loader=FileSystemLoader(os.path.dirname(__file__)) + ).get_template("ci_run_report.html.jinja") + + # Define the context for rendering + context = { + "title": "ClickHouse® CI Workflow Run Report", + "github_repo": GITHUB_REPO, + "s3_bucket": S3_BUCKET, + "pr_info_html": pr_info_html, + "pr_number": pr_number, + "workflow_id": run_id, + "commit_sha": commit_sha, + "base_sha": "" if pr_number == 0 else pr_info.get("base", {}).get("sha"), + "date": f"{datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')} UTC", + "is_preview": mark_preview, + "counts": { + "jobs_status": f"{sum(fail_results['job_statuses']['job_status'].value_counts().get(x, 0) for x in ('failure', 'error'))} fail/error", + "checks_errors": len(fail_results["checks_errors"]), + "checks_new_fails": len(fail_results["checks_fails"]), + "regression_new_fails": len(fail_results["regression_fails"]), + "cves": "N/A" if cves_not_checked else f"{high_cve_count} high/critical", + "checks_known_fails": ( + "N/A" if not known_fails else len(fail_results["checks_known_fails"]) + ), + "pr_new_fails": len(fail_results["pr_new_fails"]), + }, + "build_report_links": get_build_report_links( + fail_results["job_statuses"], pr_number, branch_name, commit_sha + ), + "ci_jobs_status_html": format_results_as_html_table( + fail_results["job_statuses"] + ), + "checks_errors_html": format_results_as_html_table( + fail_results["checks_errors"] + ), + "checks_fails_html": format_results_as_html_table(fail_results["checks_fails"]), + "regression_fails_html": format_results_as_html_table( + fail_results["regression_fails"] + ), + "docker_images_cves_html": ( + "

Not Checked

" + if cves_not_checked + else format_results_as_html_table(fail_results["docker_images_cves"]) + ), + "checks_known_fails_html": ( + "

Not Checked

" + if not known_fails + else format_results_as_html_table(fail_results["checks_known_fails"]) + ), + "new_fails_html": format_results_as_html_table(fail_results["pr_new_fails"]), + } + + # Render the template with the context + rendered_html = template.render(context) + + report_name = "ci_run_report.html" + report_path = Path(report_name) + report_path.write_text(rendered_html, encoding="utf-8") + + if no_upload: + print(f"Report saved to {report_path}") + exit(0) + + if pr_number == 0: + report_destination_key = f"REFs/{branch_name}/{commit_sha}" + else: + report_destination_key = f"PRs/{pr_number}/{commit_sha}" + + report_destination_key += f"/{run_id}/{report_name}" + + # Upload the report to S3 + s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL")) + + try: + s3_client.put_object( + Bucket=S3_BUCKET, + Key=report_destination_key, + Body=rendered_html, + ContentType="text/html; charset=utf-8", + ) + except NoCredentialsError: + print("Credentials not available for S3 upload.") + + return f"https://s3.amazonaws.com/{S3_BUCKET}/" + report_destination_key + + +def main(): + args = parse_args() + + report_url = create_workflow_report( + args.actions_run_url, + args.pr_number, + args.commit_sha, + args.no_upload, + args.known_fails, + args.cves, + args.mark_preview, + ) + + print(report_url) + + +if __name__ == "__main__": + main() diff --git a/.github/actions/create_workflow_report/workflow_report_hook.sh b/.github/actions/create_workflow_report/workflow_report_hook.sh new file mode 100755 index 000000000000..04a09a9ee3ca --- /dev/null +++ b/.github/actions/create_workflow_report/workflow_report_hook.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# This script is for generating preview reports when invoked as a post-hook from a praktika job +pip install clickhouse-driver==0.2.8 numpy==1.26.4 pandas==2.0.3 jinja2==3.1.5 +ARGS="--mark-preview --known-fails tests/broken_tests.yaml --cves --actions-run-url $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID --pr-number $PR_NUMBER" +CMD="python3 .github/actions/create_workflow_report/create_workflow_report.py" +$CMD $ARGS + diff --git a/.github/actions/docker_setup/action.yml b/.github/actions/docker_setup/action.yml new file mode 100644 index 000000000000..56f713fa59d1 --- /dev/null +++ b/.github/actions/docker_setup/action.yml @@ -0,0 +1,32 @@ +name: Docker setup +description: Setup docker +inputs: + test_name: + description: name of the test, used in determining ipv6 configs. + default: None + type: string +runs: + using: "composite" + steps: + - name: Docker IPv6 configuration + shell: bash + if: ${{ contains(inputs.test_name, 'Stateless') }} + env: + ipv6_subnet: 2001:3984:3989::/64 + run: | + # make sure docker uses proper IPv6 config + sudo touch /etc/docker/daemon.json + sudo chown ubuntu:ubuntu /etc/docker/daemon.json + sudo cat < /etc/docker/daemon.json + { + "ipv6": true, + "fixed-cidr-v6": "${{ env.ipv6_subnet }}" + } + EOT + sudo chown root:root /etc/docker/daemon.json + sudo systemctl restart docker + sudo systemctl status docker + - name: Docker info + shell: bash + run: | + docker info diff --git a/.github/actions/runner_setup/action.yml b/.github/actions/runner_setup/action.yml new file mode 100644 index 000000000000..5a229fdd47e7 --- /dev/null +++ b/.github/actions/runner_setup/action.yml @@ -0,0 +1,19 @@ +name: Setup +description: Setup environment +runs: + using: "composite" + steps: + - name: Setup zram + shell: bash + run: | + sudo modprobe zram + MemTotal=$(grep -Po "(?<=MemTotal:)\s+\d+" /proc/meminfo) # KiB + Percent=200 + ZRAM_SIZE=$(($MemTotal / 1024 / 1024 * $Percent / 100)) # Convert to GiB + .github/retry.sh 30 2 sudo zramctl --size ${ZRAM_SIZE}GiB --algorithm zstd /dev/zram0 + sudo mkswap /dev/zram0 && sudo swapon -p 100 /dev/zram0 + sudo sysctl vm.swappiness=200 + - name: Install awscli + shell: bash + run: | + .github/retry.sh 10 30 sudo apt-get install -y awscli diff --git a/.github/dco.yml b/.github/dco.yml new file mode 100644 index 000000000000..a0a2aae8f55a --- /dev/null +++ b/.github/dco.yml @@ -0,0 +1,17 @@ +# The configuration file must be named `dco.yml` and placed in the `.github` directory in the default branch of the repository. +# +# This configuration file is backwards compatible with the *dcoapp/app* (https://github.com/dcoapp/app) configuration file. + +# https://github.com/cncf/dco2?#remediation-commits +allowRemediationCommits: + # Allow individual remediation commits + # https://github.com/cncf/dco2?#individual + individual: true + # Allow third-party remediation commits + # https://github.com/cncf/dco2?#third-party + thirdParty: false + +require: + # Members are required to sign-off commits + # https://github.com/cncf/dco2?#skipping-sign-off-for-organization-members + members: false diff --git a/.github/grype/parse_vulnerabilities_grype.py b/.github/grype/parse_vulnerabilities_grype.py new file mode 100644 index 000000000000..fec2ef3bfac7 --- /dev/null +++ b/.github/grype/parse_vulnerabilities_grype.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +import json + +from testflows.core import * + +xfails = {} + + +@Name("docker vulnerabilities") +@XFails(xfails) +@TestModule +def docker_vulnerabilities(self): + with Given("I gather grype scan results"): + with open("./result.json", "r") as f: + results = json.load(f) + + for vulnerability in results["matches"]: + with Test( + f"{vulnerability['vulnerability']['id']}@{vulnerability['vulnerability']['namespace']},{vulnerability['vulnerability']['severity']}", + flags=TE, + ): + note(vulnerability) + critical_levels = set(["HIGH", "CRITICAL"]) + if vulnerability['vulnerability']["severity"].upper() in critical_levels: + with Then( + f"Found vulnerability of {vulnerability['vulnerability']['severity']} severity" + ): + result(Fail) + + +if main(): + docker_vulnerabilities() diff --git a/.github/grype/run_grype_scan.sh b/.github/grype/run_grype_scan.sh new file mode 100755 index 000000000000..af428e37d669 --- /dev/null +++ b/.github/grype/run_grype_scan.sh @@ -0,0 +1,18 @@ +set -x +set -e + +IMAGE=$1 + +GRYPE_VERSION=${GRYPE_VERSION:-"v0.92.2"} + +docker pull $IMAGE +docker pull anchore/grype:${GRYPE_VERSION} + +docker run \ + --rm --volume /var/run/docker.sock:/var/run/docker.sock \ + --name Grype anchore/grype:${GRYPE_VERSION} \ + --scope all-layers \ + -o json \ + $IMAGE > result.json + +ls -sh diff --git a/.github/grype/transform_and_upload_results_s3.sh b/.github/grype/transform_and_upload_results_s3.sh new file mode 100755 index 000000000000..38674d7a2a26 --- /dev/null +++ b/.github/grype/transform_and_upload_results_s3.sh @@ -0,0 +1,20 @@ +DOCKER_IMAGE=$(echo "$DOCKER_IMAGE" | sed 's/[\/:]/_/g') + +if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/grype/$DOCKER_IMAGE" +else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA/grype/$DOCKER_IMAGE" +fi + +S3_PATH="s3://$S3_BUCKET/$PREFIX" +HTTPS_RESULTS_PATH="https://$S3_BUCKET.s3.amazonaws.com/index.html#$PREFIX/" +HTTPS_REPORT_PATH="https://s3.amazonaws.com/$S3_BUCKET/$PREFIX/results.html" +echo "https_report_path=$HTTPS_REPORT_PATH" >> $GITHUB_OUTPUT + +tfs --no-colors transform nice raw.log nice.log.txt +tfs --no-colors report results -a $HTTPS_RESULTS_PATH raw.log - --copyright "Altinity LTD" | tfs --no-colors document convert > results.html + +aws s3 cp --no-progress nice.log.txt $S3_PATH/nice.log.txt --content-type "text/plain; charset=utf-8" || echo "nice log file not found". +aws s3 cp --no-progress results.html $S3_PATH/results.html || echo "results file not found". +aws s3 cp --no-progress raw.log $S3_PATH/raw.log || echo "raw.log file not found". +aws s3 cp --no-progress result.json $S3_PATH/result.json --content-type "text/plain; charset=utf-8" || echo "result.json not found". \ No newline at end of file diff --git a/.github/retry.sh b/.github/retry.sh new file mode 100755 index 000000000000..566c2cf11315 --- /dev/null +++ b/.github/retry.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Execute command until exitcode is 0 or +# maximum number of retries is reached +# Example: +# ./retry +retries=$1 +delay=$2 +command="${@:3}" +exitcode=0 +try=0 +until [ "$try" -ge $retries ] +do + echo "$command" + eval "$command" + exitcode=$? + if [ $exitcode -eq 0 ]; then + break + fi + try=$((try+1)) + sleep $2 +done +exit $exitcode diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 000000000000..56415c2a7478 --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,13 @@ +## Scheduled Build Run Results + +Results for **the latest** release_workflow scheduled runs. + +| Branch | Status | +| ------------ | - | +| **`antalya`** | [![antalya](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=antalya&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aantalya) | +| **`project-antalya-24.12.2`** | [![project-antalya-24.12.2](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=project-antalya-24.12.2&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aproject-antalya-24.12.2) | +| **`customizations/22.8.21`** | [![customizations/22.8.21](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/22.8.21&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/22.8.21) | +| **`customizations/23.3.19`** | [![customizations/23.3.19](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/23.3.19&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.3.19) | +| **`customizations/23.8.16`** | [![customizations/23.8.16](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/23.8.16&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.8.16) | +| **`customizations/24.3.14`** | [![customizations/24.3.14](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/24.3.14&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.3.14) | +| **`customizations/24.8.11`** | [![customizations/24.8.11](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/24.8.11&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.8.11) | diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 078c0ea4d488..2aea4e6705fc 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -3,6 +3,13 @@ name: BackportPR on: + workflow_dispatch: + inputs: + no_cache: + description: Run without cache + required: false + type: boolean + default: false pull_request: branches: ['2[1-9].[1-9][0-9]', '2[1-9].[1-9]'] @@ -10,8 +17,22 @@ env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 DISABLE_CI_MERGE_COMMIT: ${{ vars.DISABLE_CI_MERGE_COMMIT || '0' }} - DISABLE_CI_CACHE: ${{ vars.DISABLE_CI_CACHE || '0' }} + DISABLE_CI_CACHE: ${{ github.event.inputs.no_cache || '0' }} CHECKOUT_REF: ${{ vars.DISABLE_CI_MERGE_COMMIT == '1' && github.event.pull_request.head.sha || '' }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }} + AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }} + AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }} + AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + GH_TOKEN: ${{ github.token }} # Allow updating GH commit statuses and PR comments to post an actual job reports link permissions: write-all @@ -19,7 +40,7 @@ permissions: write-all jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -31,6 +52,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -58,7 +99,7 @@ jobs: fi dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -71,6 +112,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -98,7 +146,7 @@ jobs: fi dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -111,6 +159,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -138,7 +193,7 @@ jobs: fi build_amd_debug: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} name: "Build (amd_debug)" @@ -151,6 +206,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -178,7 +240,7 @@ jobs: fi build_amd_asan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} name: "Build (amd_asan)" @@ -191,6 +253,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -218,7 +287,7 @@ jobs: fi build_amd_tsan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} name: "Build (amd_tsan)" @@ -231,6 +300,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -258,7 +334,7 @@ jobs: fi build_amd_release: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} name: "Build (amd_release)" @@ -271,6 +347,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -298,7 +381,7 @@ jobs: fi build_arm_release: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} name: "Build (arm_release)" @@ -311,6 +394,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -338,7 +428,7 @@ jobs: fi build_amd_darwin: - runs-on: [self-hosted, amd-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }} name: "Build (amd_darwin)" @@ -351,6 +441,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_darwin)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -378,7 +475,7 @@ jobs: fi build_arm_darwin: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }} name: "Build (arm_darwin)" @@ -391,6 +488,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_darwin)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -418,7 +522,7 @@ jobs: fi docker_server_image: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} name: "Docker server image" @@ -431,6 +535,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker server image" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -458,7 +569,7 @@ jobs: fi docker_keeper_image: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} name: "Docker keeper image" @@ -471,6 +582,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker keeper image" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -498,7 +616,7 @@ jobs: fi install_packages_amd_release: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} name: "Install packages (amd_release)" @@ -511,6 +629,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -538,7 +663,7 @@ jobs: fi install_packages_arm_release: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} name: "Install packages (arm_release)" @@ -551,6 +676,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -578,7 +710,7 @@ jobs: fi compatibility_check_amd_release: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYW1kX3JlbGVhc2Up') }} name: "Compatibility check (amd_release)" @@ -591,6 +723,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -618,7 +757,7 @@ jobs: fi compatibility_check_arm_release: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYXJtX3JlbGVhc2Up') }} name: "Compatibility check (arm_release)" @@ -631,6 +770,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -658,7 +804,7 @@ jobs: fi stateless_tests_amd_asan_distributed_plan_parallel_1_2: - runs-on: [self-hosted, amd-medium-cpu] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" @@ -671,6 +817,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -698,7 +851,7 @@ jobs: fi stateless_tests_amd_asan_distributed_plan_parallel_2_2: - runs-on: [self-hosted, amd-medium-cpu] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" @@ -711,6 +864,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -738,7 +898,7 @@ jobs: fi stateless_tests_amd_asan_db_disk_distributed_plan_sequential: - runs-on: [self-hosted, amd-small-mem] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGIgZGlzaywgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" @@ -751,6 +911,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -778,7 +945,7 @@ jobs: fi stress_test_amd_tsan: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} name: "Stress test (amd_tsan)" @@ -791,6 +958,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -818,7 +992,7 @@ jobs: fi integration_tests_amd_asan_db_disk_old_analyzer_1_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" @@ -831,6 +1005,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -858,7 +1039,7 @@ jobs: fi integration_tests_amd_asan_db_disk_old_analyzer_2_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" @@ -871,6 +1052,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -898,7 +1086,7 @@ jobs: fi integration_tests_amd_asan_db_disk_old_analyzer_3_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" @@ -911,6 +1099,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -938,7 +1133,7 @@ jobs: fi integration_tests_amd_asan_db_disk_old_analyzer_4_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" @@ -951,6 +1146,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -978,7 +1180,7 @@ jobs: fi integration_tests_amd_asan_db_disk_old_analyzer_5_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" @@ -991,6 +1193,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1018,7 +1227,7 @@ jobs: fi integration_tests_amd_asan_db_disk_old_analyzer_6_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" @@ -1031,6 +1240,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1058,7 +1274,7 @@ jobs: fi integration_tests_amd_tsan_1_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} name: "Integration tests (amd_tsan, 1/6)" @@ -1071,6 +1287,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1098,7 +1321,7 @@ jobs: fi integration_tests_amd_tsan_2_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} name: "Integration tests (amd_tsan, 2/6)" @@ -1111,6 +1334,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1138,7 +1368,7 @@ jobs: fi integration_tests_amd_tsan_3_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} name: "Integration tests (amd_tsan, 3/6)" @@ -1151,6 +1381,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1178,7 +1415,7 @@ jobs: fi integration_tests_amd_tsan_4_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} name: "Integration tests (amd_tsan, 4/6)" @@ -1191,6 +1428,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1218,7 +1462,7 @@ jobs: fi integration_tests_amd_tsan_5_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} name: "Integration tests (amd_tsan, 5/6)" @@ -1231,6 +1475,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1258,7 +1509,7 @@ jobs: fi integration_tests_amd_tsan_6_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} name: "Integration tests (amd_tsan, 6/6)" @@ -1271,6 +1522,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1298,7 +1556,7 @@ jobs: fi finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_release, build_arm_release, build_amd_darwin, build_arm_darwin, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, compatibility_check_amd_release, compatibility_check_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, stress_test_amd_tsan, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6] if: ${{ always() }} name: "Finish Workflow" @@ -1311,6 +1569,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp diff --git a/.github/workflows/cancel.yml b/.github/workflows/cancel.yml new file mode 100644 index 000000000000..c1e11ef212cd --- /dev/null +++ b/.github/workflows/cancel.yml @@ -0,0 +1,19 @@ +name: Cancel + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + +on: # yamllint disable-line rule:truthy + workflow_run: + workflows: ["PR","PullRequestCI", "ReleaseBranchCI", "DocsCheck", "BackportPR"] + types: + - requested +jobs: + cancel: + runs-on: ubuntu-latest + steps: + - uses: styfle/cancel-workflow-action@0.9.1 + with: + all_but_latest: true + workflow_id: ${{ github.event.workflow.id }} diff --git a/.github/workflows/cherry_pick.yml b/.github/workflows/cherry_pick.yml index 315673d4abcc..8e5191eb33cc 100644 --- a/.github/workflows/cherry_pick.yml +++ b/.github/workflows/cherry_pick.yml @@ -28,7 +28,7 @@ jobs: REPO_TEAM=core EOF - name: Check out repository code - uses: ClickHouse/checkout@v1 + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f with: clear-repository: true token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} diff --git a/.github/workflows/compare_fails.yml b/.github/workflows/compare_fails.yml new file mode 100644 index 000000000000..1f734845ac1a --- /dev/null +++ b/.github/workflows/compare_fails.yml @@ -0,0 +1,110 @@ +name: Compare CI Failures + +on: + workflow_dispatch: + inputs: + current_ref: + description: 'Current reference (commit hash or git tag) (default: current commit on selected branch)' + required: false + type: string + previous_ref: + description: 'Previous reference to compare with (commit hash, git tag or workflow url) (default: previous stable tag for current reference)' + required: false + type: string + upstream_ref: + description: 'Upstream reference to compare with (commit hash, git tag or MAJOR.MINOR version) (default: previous lts tag for current reference)' + required: false + type: string + include_broken: + description: 'Include BROKEN tests in comparison' + required: false + type: boolean + default: false + push: + tags: + - 'v*.altinity*' + +env: + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + +jobs: + Compare: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + steps: + - name: Check commit status + run: | + if [[ "${{ github.event_name }}" == "workflow_dispatch" && -n "${{ inputs.current_ref }}" ]]; then + # For workflow_dispatch with custom ref, skip the check + exit 0 + fi + + # Query GitHub API for commit status + STATUSES=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + "https://api.github.com/repos/${{ github.repository }}/commits/${{ github.sha }}/status") + + # Check if there are any statuses + if [ "$(echo $STATUSES | jq '.total_count')" -eq 0 ]; then + echo "No commit statuses found for ${{ github.sha }}. Assuming tests have not run yet. Aborting workflow." + exit 1 + fi + + echo "Found commit statuses, proceeding with comparison." + + - name: Check out repository code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ inputs.current_ref || github.ref }} + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + cache: 'pip' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install clickhouse-driver requests pandas tabulate + + - name: Set default refs + id: default_refs + run: | + VERSION=$(git describe --tags --abbrev=0 | sed 's/v\([0-9]\+\.[0-9]\+\).*/\1/') + echo "Detected version: $VERSION" + + CURRENT_TAG=$(git tag --contains ${{ inputs.current_ref || github.sha }} | sort -r | grep -m 1 'altinity' || echo '') + echo "CURRENT_TAG: '$CURRENT_TAG' ${{ inputs.current_ref || github.sha }}" + PREVIOUS_TAG_COMMIT=$(git log -1 --until=yesterday --tags=v${VERSION}*.altinity* | grep -Po "(?<=commit ).*") + PREVIOUS_TAG=$(git tag --contains $PREVIOUS_TAG_COMMIT | sort -r | grep -m 1 'altinity') + echo "PREVIOUS_TAG: '$PREVIOUS_TAG' $PREVIOUS_TAG_COMMIT" + UPSTREAM_TAG_COMMIT=$(git log -1 --tags=v${VERSION}*-lts | grep -Po "(?<=commit ).*") + UPSTREAM_TAG=$(git tag --contains $UPSTREAM_TAG_COMMIT | sort -r | grep -m 1 'lts') + echo "UPSTREAM_TAG: '$UPSTREAM_TAG' $UPSTREAM_TAG_COMMIT" + + echo "PREVIOUS_TAG=$PREVIOUS_TAG" >> $GITHUB_OUTPUT + echo "PREVIOUS_TAG_COMMIT=$PREVIOUS_TAG_COMMIT" >> $GITHUB_OUTPUT + echo "UPSTREAM_TAG=$UPSTREAM_TAG" >> $GITHUB_OUTPUT + echo "UPSTREAM_TAG_COMMIT=$UPSTREAM_TAG_COMMIT" >> $GITHUB_OUTPUT + echo "CURRENT_TAG=$CURRENT_TAG" >> $GITHUB_OUTPUT + - name: Comparison report + if: ${{ !cancelled() }} + run: | + git clone https://github.com/Altinity/actions.git + cd actions + git checkout 4623f919ee2738bea69aad405879562476736932 + python3 scripts/compare_ci_fails.py \ + --current-ref ${{ steps.default_refs.outputs.CURRENT_TAG || inputs.current_ref || github.sha }} \ + --previous-ref ${{ steps.default_refs.outputs.PREVIOUS_TAG || inputs.previous_ref || steps.default_refs.outputs.PREVIOUS_TAG_COMMIT }} \ + --upstream-ref ${{ steps.default_refs.outputs.UPSTREAM_TAG || inputs.upstream_ref || steps.default_refs.outputs.UPSTREAM_TAG_COMMIT }} \ + ${{ inputs.include_broken && '--broken' || '' }} + cat comparison_results.md >> $GITHUB_STEP_SUMMARY + + - name: Upload comparison results + uses: actions/upload-artifact@v4 + with: + name: comparison-results + path: | + actions/comparison_results.md diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index dc708514dfd5..421261fb436f 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -70,7 +70,7 @@ jobs: runs-on: [self-hosted, release-maker] steps: - name: Check out repository code - uses: ClickHouse/checkout@v1 + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f with: token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} fetch-depth: 0 diff --git a/.github/workflows/docker_publish.yml b/.github/workflows/docker_publish.yml new file mode 100644 index 000000000000..1e59aa8b5b8d --- /dev/null +++ b/.github/workflows/docker_publish.yml @@ -0,0 +1,150 @@ +name: Republish Multiarch Docker Image + +on: + workflow_dispatch: + inputs: + docker_image: + description: 'Multiarch Docker image with tag' + required: true + release_environment: + description: 'Select release type: "staging" or "production"' + type: choice + default: 'staging' + options: + - staging + - production + upload_artifacts: + description: 'Upload artifacts directly in this workflow' + type: boolean + default: true + s3_upload_path: + description: 'Upload artifacts to s3 path' + type: string + required: false + workflow_call: + inputs: + docker_image: + type: string + required: true + release_environment: + type: string + required: false + default: 'staging' + upload_artifacts: + type: boolean + required: false + default: false + s3_upload_path: + type: string + required: false + outputs: + image_archives_path: + description: 'Path to the image archives directory' + value: ${{ jobs.republish.outputs.image_archives_path }} + +env: + IMAGE: ${{ github.event.inputs.docker_image || inputs.docker_image }} + +jobs: + republish: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + outputs: + image_archives_path: ${{ steps.set_path.outputs.image_archives_path }} + steps: + - name: Docker Hub Login + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_TOKEN }} + + - name: Set clickhouse-server version as new tag + run: | + # Determine "clickhouse-server" or "clickhouse-keeper" + echo "Input IMAGE: $IMAGE" + COMPONENT=$(echo "$IMAGE" | sed -E 's|.*/(clickhouse-[^:]+):.*|\1|') + echo "Component determined: $COMPONENT" + echo "COMPONENT=$COMPONENT" >> $GITHUB_ENV + + # Pull the image + echo "Pulling the image" + docker pull $IMAGE + + # Get version and clean it up + echo "Getting version from image..." + VERSION_OUTPUT=$(docker run --rm $IMAGE $COMPONENT --version) + echo "Raw version output: $VERSION_OUTPUT" + + # Extract just the version number + NEW_TAG=$(echo "$VERSION_OUTPUT" | sed -E 's/.*version ([0-9.]+[^ ]*).*/\1/') + echo "Cleaned version: $NEW_TAG" + + # Append "-prerelease" if necessary + if [ "${{ github.event.inputs.release_environment || inputs.release_environment }}" = "staging" ]; then + NEW_TAG="${NEW_TAG}-prerelease" + fi + + if [[ "$IMAGE" == *-alpine* ]]; then + NEW_TAG="${NEW_TAG}-alpine" + fi + echo "New tag: $NEW_TAG" + + # Export the new tag + echo "NEW_TAG=$NEW_TAG" >> $GITHUB_ENV + + - name: Process multiarch manifest + run: | + echo "Re-tag multiarch image $IMAGE to altinity/$COMPONENT:$NEW_TAG" + docker buildx imagetools create --tag "altinity/$COMPONENT:$NEW_TAG" "$IMAGE" + + # Create directory for image archives + mkdir -p image_archives + + # Pull and save platform-specific images + for PLATFORM in "linux/amd64" "linux/arm64"; do + echo "Pulling and saving image for $PLATFORM..." + # Pull the specific platform image + docker pull --platform $PLATFORM "altinity/$COMPONENT:$NEW_TAG" + + # Save the image to a tar file + ARCH=$(echo $PLATFORM | cut -d'/' -f2) + docker save "altinity/$COMPONENT:$NEW_TAG" -o "image_archives/${COMPONENT}-${NEW_TAG}-${ARCH}.tar" + done + + # Save manifest inspection + docker buildx imagetools inspect "altinity/$COMPONENT:$NEW_TAG" > image_archives/manifest.txt + + # Compress the archives + cd image_archives + for file in *.tar; do + gzip "$file" + done + cd .. + + - name: Set image archives path + id: set_path + run: | + echo "image_archives_path=${{ github.workspace }}/image_archives" >> $GITHUB_OUTPUT + + - name: Upload image archives + if: ${{ github.event.inputs.upload_artifacts || inputs.upload_artifacts }} + uses: actions/upload-artifact@v4 + with: + name: docker-images-backup + path: image_archives/ + retention-days: 90 + + - name: Install aws cli + if: ${{ inputs.s3_upload_path != '' }} + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + + - name: Upload to S3 + if: ${{ inputs.s3_upload_path != '' }} + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + run: | + aws s3 sync image_archives/ "${{ inputs.s3_upload_path }}" + diff --git a/.github/workflows/grype_scan.yml b/.github/workflows/grype_scan.yml new file mode 100644 index 000000000000..a92fec5f9879 --- /dev/null +++ b/.github/workflows/grype_scan.yml @@ -0,0 +1,154 @@ +name: Grype Scan +run-name: Grype Scan ${{ inputs.docker_image }} + +on: + workflow_dispatch: + # Inputs for manual run + inputs: + docker_image: + description: 'Docker image. If no tag, it will be determined by version_helper.py' + required: true + workflow_call: + # Inputs for workflow call + inputs: + docker_image: + description: 'Docker image. If no tag, it will be determined by version_helper.py' + required: true + type: string + version: + description: 'Version tag. If no version, it will be determined by version_helper.py' + required: false + type: string + default: "" + tag-suffix: + description: 'Tag suffix. To be appended the version from version_helper.py' + required: false + type: string + default: "" +env: + PYTHONUNBUFFERED: 1 + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + GRYPE_VERSION: "v0.92.2-arm64v8" + +jobs: + grype_scan: + name: Grype Scan + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker + uses: docker/setup-buildx-action@v3 + + - name: Set up Python + run: | + export TESTFLOWS_VERSION="2.4.19" + sudo apt-get update + sudo apt-get install -y python3-pip python3-venv + python3 -m venv venv + source venv/bin/activate + pip install --upgrade requests chardet urllib3 unidiff boto3 PyGithub + pip install testflows==$TESTFLOWS_VERSION awscli==1.33.28 + echo PATH=$PATH >>$GITHUB_ENV + + - name: Set image tag if not given + if: ${{ !contains(inputs.docker_image, ':') }} + id: set_version + env: + TAG_SUFFIX: ${{ inputs.tag-suffix }} + SPECIFIED_VERSION: ${{ inputs.version }} + run: | + python3 ./tests/ci/version_helper.py | grep = | tee /tmp/version_info + source /tmp/version_info + if [ -z "$SPECIFIED_VERSION" ]; then + VERSION=$CLICKHOUSE_VERSION_STRING + else + VERSION=$SPECIFIED_VERSION + fi + echo "docker_image=${{ inputs.docker_image }}:$PR_NUMBER-$VERSION$TAG_SUFFIX" >> $GITHUB_OUTPUT + + - name: Run Grype Scan + run: | + DOCKER_IMAGE=${{ steps.set_version.outputs.docker_image || inputs.docker_image }} + ./.github/grype/run_grype_scan.sh $DOCKER_IMAGE + + - name: Parse grype results + run: | + python3 -u ./.github/grype/parse_vulnerabilities_grype.py -o nice --no-colors --log raw.log --test-to-end + + - name: Transform and Upload Grype Results + if: always() + id: upload_results + env: + S3_BUCKET: "altinity-build-artifacts" + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + PR_NUMBER: ${{ env.PR_NUMBER || github.event.pull_request.number || 0 }} + DOCKER_IMAGE: ${{ steps.set_version.outputs.docker_image || inputs.docker_image }} + run: | + echo "PR_NUMBER=$PR_NUMBER" + ./.github/grype/transform_and_upload_results_s3.sh + + - name: Create step summary + if: always() + id: create_summary + run: | + jq -r '"**Image**: \(.source.target.userInput)"' result.json >> $GITHUB_STEP_SUMMARY + jq -r '.distro | "**Distro**: \(.name):\(.version)"' result.json >> $GITHUB_STEP_SUMMARY + if jq -e '.matches | length == 0' result.json > /dev/null; then + echo "No CVEs" >> $GITHUB_STEP_SUMMARY + else + echo "| Severity | Count |" >> $GITHUB_STEP_SUMMARY + echo "|------------|-------|" >> $GITHUB_STEP_SUMMARY + jq -r ' + .matches | + map(.vulnerability.severity) | + group_by(.) | + map({severity: .[0], count: length}) | + sort_by(.severity) | + map("| \(.severity) | \(.count) |") | + .[] + ' result.json >> $GITHUB_STEP_SUMMARY + fi + + HIGH_COUNT=$(jq -r '.matches | map(.vulnerability) | unique_by(.id) | map(.severity) | map(select(. == "High")) | length' result.json) + CRITICAL_COUNT=$(jq -r '.matches | map(.vulnerability) | unique_by(.id) | map(.severity) | map(select(. == "Critical")) | length' result.json) + TOTAL_HIGH_CRITICAL=$((HIGH_COUNT + CRITICAL_COUNT)) + echo "total_high_critical=$TOTAL_HIGH_CRITICAL" >> $GITHUB_OUTPUT + + if [ $TOTAL_HIGH_CRITICAL -gt 0 ]; then + echo '## High and Critical vulnerabilities found' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + cat raw.log | tfs --no-colors show tests | grep -Pi 'High|Critical' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + fi + + - name: Set commit status + if: always() + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const totalHighCritical = '${{ steps.create_summary.outputs.total_high_critical }}'; + const hasError = totalHighCritical === ''; + const hasVulnerabilities = parseInt(totalHighCritical) > 0; + github.rest.repos.createCommitStatus({ + owner: context.repo.owner, + repo: context.repo.repo, + sha: '${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}', + state: hasError ? 'error' : hasVulnerabilities ? 'failure' : 'success', + target_url: '${{ steps.upload_results.outputs.https_report_path }}', + description: hasError ? 'An error occurred' : `Grype Scan Completed with ${totalHighCritical} high/critical vulnerabilities`, + context: 'Grype Scan ${{ steps.set_version.outputs.docker_image || inputs.docker_image }}' + }); + + - name: Upload artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: grype-results-${{ hashFiles('raw.log') }} + path: | + result.json + nice.log.txt diff --git a/.github/workflows/hourly.yml b/.github/workflows/hourly.yml index 69fafc3abda8..25e32f4ba0f6 100644 --- a/.github/workflows/hourly.yml +++ b/.github/workflows/hourly.yml @@ -16,7 +16,7 @@ env: jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -28,6 +28,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -55,7 +75,7 @@ jobs: fi collect_flaky_tests: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] name: "Collect flaky tests" outputs: @@ -67,6 +87,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Collect flaky tests" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -94,7 +121,7 @@ jobs: fi autoassign_approvers: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] name: "Autoassign approvers" outputs: @@ -106,6 +133,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Autoassign approvers" + - name: Prepare env script run: | rm -rf ./ci/tmp diff --git a/.github/workflows/init_praktika.yml b/.github/workflows/init_praktika.yml new file mode 100644 index 000000000000..e9f56e0d2396 --- /dev/null +++ b/.github/workflows/init_praktika.yml @@ -0,0 +1,27 @@ +name: InitPraktikaReport + +# This workflow is used to initialize/update the praktika report in S3. +# It does not need to run often, when a new release is created should be plenty. + +on: + workflow_dispatch: + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + +jobs: + + init_praktika: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + name: "Init praktika" + steps: + - name: Init praktika report + run: | + export PYTHONPATH=./ci:.: + pip install htmlmin + python3 -m praktika html + \ No newline at end of file diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index f518b9ec0979..de81027d0d48 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -3,13 +3,34 @@ name: MasterCI on: + workflow_dispatch: + inputs: + no_cache: + description: Run without cache + required: false + type: boolean + default: false push: - branches: ['master'] + branches: ['antalya', 'releases/*', 'antalya-*'] env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 CHECKOUT_REF: "" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }} + AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }} + AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }} + AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + GH_TOKEN: ${{ github.token }} # Allow updating GH commit statuses and PR comments to post an actual job reports link permissions: write-all @@ -17,7 +38,7 @@ permissions: write-all jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -29,6 +50,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -56,7 +97,7 @@ jobs: fi dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -69,6 +110,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -96,7 +144,7 @@ jobs: fi dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -109,6 +157,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -136,7 +191,7 @@ jobs: fi dockers_build_multiplatform_manifest: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAobXVsdGlwbGF0Zm9ybSBtYW5pZmVzdCk=') }} name: "Dockers Build (multiplatform manifest)" @@ -149,45 +204,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - build_arm_tidy: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV90aWR5KQ==') }} - name: "Build (arm_tidy)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Dockers Build (multiplatform manifest)" - name: Prepare env script run: | @@ -210,13 +232,13 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_tidy)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_tidy)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi build_amd_debug: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} name: "Build (amd_debug)" @@ -229,6 +251,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -256,7 +285,7 @@ jobs: fi build_amd_asan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} name: "Build (amd_asan)" @@ -269,6 +298,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -296,7 +332,7 @@ jobs: fi build_amd_tsan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} name: "Build (amd_tsan)" @@ -309,6 +345,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -336,7 +379,7 @@ jobs: fi build_amd_msan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }} name: "Build (amd_msan)" @@ -349,6 +392,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -376,7 +426,7 @@ jobs: fi build_amd_ubsan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }} name: "Build (amd_ubsan)" @@ -389,6 +439,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -416,7 +473,7 @@ jobs: fi build_amd_binary: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }} name: "Build (amd_binary)" @@ -429,6 +486,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_binary)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -456,7 +520,7 @@ jobs: fi build_arm_asan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }} name: "Build (arm_asan)" @@ -469,6 +533,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -496,7 +567,7 @@ jobs: fi build_arm_binary: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }} name: "Build (arm_binary)" @@ -509,6 +580,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_binary)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -536,7 +614,7 @@ jobs: fi build_amd_release: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} name: "Build (amd_release)" @@ -549,6 +627,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -576,7 +661,7 @@ jobs: fi build_arm_release: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} name: "Build (arm_release)" @@ -589,6 +674,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -615,11 +707,11 @@ jobs: python3 -m praktika run 'Build (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_amd_darwin: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }} - name: "Build (amd_darwin)" + unit_tests_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }} + name: "Unit tests (asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -629,6 +721,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -650,16 +749,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (amd_darwin)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (amd_darwin)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_arm_darwin: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }} - name: "Build (arm_darwin)" + unit_tests_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }} + name: "Unit tests (tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -669,6 +768,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -690,16 +796,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_darwin)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_darwin)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_arm_v80compat: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV92ODBjb21wYXQp') }} - name: "Build (arm_v80compat)" + unit_tests_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }} + name: "Unit tests (msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -709,6 +815,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -730,16 +843,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_v80compat)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_v80compat)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_amd_freebsd: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9mcmVlYnNkKQ==') }} - name: "Build (amd_freebsd)" + unit_tests_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }} + name: "Unit tests (ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -749,6 +862,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -770,16 +890,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (amd_freebsd)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (amd_freebsd)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_ppc64le: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKHBwYzY0bGUp') }} - name: "Build (ppc64le)" + docker_server_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + name: "Docker server image" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -789,6 +909,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker server image" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -810,16 +937,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (ppc64le)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (ppc64le)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_amd_compat: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9jb21wYXQp') }} - name: "Build (amd_compat)" + docker_keeper_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + name: "Docker keeper image" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -829,6 +956,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker keeper image" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -850,16 +984,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (amd_compat)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (amd_compat)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_amd_musl: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9tdXNsKQ==') }} - name: "Build (amd_musl)" + install_packages_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} + name: "Install packages (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -869,6 +1003,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -890,16 +1031,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (amd_musl)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (amd_musl)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_riscv64: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKHJpc2N2NjQp') }} - name: "Build (riscv64)" + install_packages_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} + name: "Install packages (arm_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -909,6 +1050,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -930,16 +1078,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (riscv64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (riscv64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_s390x: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKHMzOTB4KQ==') }} - name: "Build (s390x)" + compatibility_check_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYW1kX3JlbGVhc2Up') }} + name: "Compatibility check (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -949,6 +1097,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -970,16 +1125,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (s390x)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Compatibility check (amd_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (s390x)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Compatibility check (amd_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_loongarch64: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGxvb25nYXJjaDY0KQ==') }} - name: "Build (loongarch64)" + compatibility_check_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYXJtX3JlbGVhc2Up') }} + name: "Compatibility check (arm_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -989,6 +1144,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1010,16 +1172,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (loongarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Compatibility check (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (loongarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Compatibility check (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - build_arm_fuzzers: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9mdXp6ZXJzKQ==') }} - name: "Build (arm_fuzzers)" + stateless_tests_amd_asan_distributed_plan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1029,6 +1191,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1050,16 +1219,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_fuzzers)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_fuzzers)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - unit_tests_asan: - runs-on: [self-hosted, amd-large] + stateless_tests_amd_asan_distributed_plan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }} - name: "Unit tests (asan)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1069,6 +1238,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1090,16 +1266,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - unit_tests_tsan: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }} - name: "Unit tests (tsan)" + stateless_tests_amd_asan_db_disk_distributed_plan_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGIgZGlzaywgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} + name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1109,6 +1285,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1130,16 +1313,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - unit_tests_msan: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }} - name: "Unit tests (msan)" + stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgcGFyYWxsZWwp') }} + name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1149,6 +1332,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1170,16 +1360,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - unit_tests_ubsan: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }} - name: "Unit tests (ubsan)" + stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgc2VxdWVudGlhbCk=') }} + name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1189,6 +1379,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1210,16 +1407,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - docker_server_image: - runs-on: [self-hosted, style-checker] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} - name: "Docker server image" + stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} + name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1229,6 +1426,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1250,16 +1454,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - docker_keeper_image: - runs-on: [self-hosted, style-checker] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} - name: "Docker keeper image" + stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1269,6 +1473,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1290,16 +1501,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - install_packages_amd_release: - runs-on: [self-hosted, style-checker] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} - name: "Install packages (amd_release)" + stateless_tests_amd_debug_asyncinsert_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} + name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1309,6 +1520,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1330,1256 +1548,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - install_packages_arm_release: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} - name: "Install packages (arm_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - compatibility_check_amd_release: - runs-on: [self-hosted, style-checker] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYW1kX3JlbGVhc2Up') }} - name: "Compatibility check (amd_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Compatibility check (amd_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Compatibility check (amd_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - compatibility_check_arm_release: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYXJtX3JlbGVhc2Up') }} - name: "Compatibility check (arm_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Compatibility check (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Compatibility check (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_asan_distributed_plan_parallel_1_2: - runs-on: [self-hosted, amd-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_asan_distributed_plan_parallel_2_2: - runs-on: [self-hosted, amd-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_asan_db_disk_distributed_plan_sequential: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGIgZGlzaywgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} - name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgcGFyYWxsZWwp') }} - name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgc2VxdWVudGlhbCk=') }} - name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} - name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_asyncinsert_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} - name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_asyncinsert_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_parallel: - runs-on: [self-hosted, amd-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_debug, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_debug, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_parallel_1_2: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_tsan, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_parallel_2_2: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_tsan, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_sequential_1_2: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_tsan, sequential, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_sequential_2_2: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_tsan, sequential, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_msan_parallel_1_2: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_msan, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_msan_parallel_2_2: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_msan, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_msan_sequential_1_2: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_msan, sequential, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_msan_sequential_2_2: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_msan, sequential, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_ubsan_parallel: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_ubsan, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_ubsan_sequential: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_ubsan, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_distributed_plan_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_distributed_plan_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_s3_storage_parallel_1_2: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_s3_storage_parallel_2_2: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_s3_storage_sequential_1_2: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_s3_storage_sequential_2_2: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_arm_binary_parallel: - runs-on: [self-hosted, arm-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }} - name: "Stateless tests (arm_binary, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_arm_binary_sequential: - runs-on: [self-hosted, arm-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (arm_binary, sequential)" + stateless_tests_amd_debug_asyncinsert_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2589,45 +1567,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_arm_asan_azure_parallel: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (arm_asan, azure, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)" - name: Prepare env script run: | @@ -2650,16 +1595,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_asan, azure, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (arm_asan, azure, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_arm_asan_azure_sequential: - runs-on: [self-hosted, arm-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (arm_asan, azure, sequential)" + stateless_tests_amd_debug_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2669,45 +1614,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_asan, azure, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (arm_asan, azure, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log - fi - - integration_tests_amd_asan_db_disk_old_analyzer_1_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Stateless tests (amd_debug, parallel)" - name: Prepare env script run: | @@ -2730,16 +1642,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_db_disk_old_analyzer_2_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" + stateless_tests_amd_debug_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_debug, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2749,6 +1661,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2770,16 +1689,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_db_disk_old_analyzer_3_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" + stateless_tests_amd_tsan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_tsan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2789,6 +1708,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2810,16 +1736,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_db_disk_old_analyzer_4_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" + stateless_tests_amd_tsan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_tsan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2829,6 +1755,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2850,16 +1783,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_db_disk_old_analyzer_5_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" + stateless_tests_amd_tsan_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_tsan, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2869,6 +1802,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2890,16 +1830,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_db_disk_old_analyzer_6_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" + stateless_tests_amd_tsan_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_tsan, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2909,6 +1849,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2930,16 +1877,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_1_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }} - name: "Integration tests (amd_binary, 1/5)" + stateless_tests_amd_msan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_msan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2949,6 +1896,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2970,16 +1924,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_2_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }} - name: "Integration tests (amd_binary, 2/5)" + stateless_tests_amd_msan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_msan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2989,6 +1943,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3010,16 +1971,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_3_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }} - name: "Integration tests (amd_binary, 3/5)" + stateless_tests_amd_msan_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_msan, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3029,6 +1990,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3050,16 +2018,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_4_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }} - name: "Integration tests (amd_binary, 4/5)" + stateless_tests_amd_msan_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_msan, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3069,6 +2037,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3090,16 +2065,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_5_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }} - name: "Integration tests (amd_binary, 5/5)" + stateless_tests_amd_ubsan_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_ubsan, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3109,6 +2084,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_ubsan, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3130,16 +2112,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_arm_binary_distributed_plan_1_4: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 1/4)" + stateless_tests_amd_ubsan_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_ubsan, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3149,6 +2131,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_ubsan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3170,16 +2159,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_arm_binary_distributed_plan_2_4: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 2/4)" + stateless_tests_amd_debug_distributed_plan_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3189,6 +2178,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3210,16 +2206,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_arm_binary_distributed_plan_3_4: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 3/4)" + stateless_tests_amd_debug_distributed_plan_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3229,6 +2225,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3250,16 +2253,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_arm_binary_distributed_plan_4_4: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 4/4)" + stateless_tests_amd_tsan_s3_storage_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3269,6 +2272,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3290,16 +2300,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_1_6: - runs-on: [self-hosted, amd-medium] + stateless_tests_amd_tsan_s3_storage_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} - name: "Integration tests (amd_tsan, 1/6)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3309,6 +2319,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3330,16 +2347,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_2_6: - runs-on: [self-hosted, amd-medium] + stateless_tests_amd_tsan_s3_storage_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} - name: "Integration tests (amd_tsan, 2/6)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3349,6 +2366,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3370,16 +2394,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_3_6: - runs-on: [self-hosted, amd-medium] + stateless_tests_amd_tsan_s3_storage_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} - name: "Integration tests (amd_tsan, 3/6)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3389,6 +2413,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3410,16 +2441,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_4_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} - name: "Integration tests (amd_tsan, 4/6)" + stateless_tests_arm_binary_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }} + name: "Stateless tests (arm_binary, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3429,6 +2460,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3450,16 +2488,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_5_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} - name: "Integration tests (amd_tsan, 5/6)" + stateless_tests_arm_binary_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (arm_binary, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3469,6 +2507,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3490,16 +2535,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_6_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} - name: "Integration tests (amd_tsan, 6/6)" + integration_tests_amd_asan_db_disk_old_analyzer_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3509,6 +2554,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3530,16 +2582,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }} - name: "Stress test (amd_debug)" + integration_tests_amd_asan_db_disk_old_analyzer_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3549,6 +2601,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3570,16 +2629,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} - name: "Stress test (amd_tsan)" + integration_tests_amd_asan_db_disk_old_analyzer_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3589,6 +2648,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3610,16 +2676,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }} - name: "Stress test (arm_asan)" + integration_tests_amd_asan_db_disk_old_analyzer_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3629,6 +2695,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3650,16 +2723,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_arm_asan_s3: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuLCBzMyk=') }} - name: "Stress test (arm_asan, s3)" + integration_tests_amd_asan_db_disk_old_analyzer_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3669,6 +2742,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3690,16 +2770,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (arm_asan, s3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (arm_asan, s3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }} - name: "Stress test (amd_ubsan)" + integration_tests_amd_asan_db_disk_old_analyzer_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3709,6 +2789,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3730,16 +2817,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }} - name: "Stress test (amd_msan)" + integration_tests_amd_binary_1_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }} + name: "Integration tests (amd_binary, 1/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3749,6 +2836,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 1/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3770,16 +2864,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_azure_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCBhbWRfbXNhbik=') }} - name: "Stress test (azure, amd_msan)" + integration_tests_amd_binary_2_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }} + name: "Integration tests (amd_binary, 2/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3789,6 +2883,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 2/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3810,16 +2911,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (azure, amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (azure, amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - stress_test_azure_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCBhbWRfdHNhbik=') }} - name: "Stress test (azure, amd_tsan)" + integration_tests_amd_binary_3_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }} + name: "Integration tests (amd_binary, 3/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3829,6 +2930,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 3/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3850,16 +2958,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (azure, amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (azure, amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }} - name: "AST fuzzer (amd_debug)" + integration_tests_amd_binary_4_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }} + name: "Integration tests (amd_binary, 4/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3869,6 +2977,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 4/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3890,16 +3005,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYXJtX2FzYW4p') }} - name: "AST fuzzer (arm_asan)" + integration_tests_amd_binary_5_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }} + name: "Integration tests (amd_binary, 5/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3909,6 +3024,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 5/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3930,16 +3052,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }} - name: "AST fuzzer (amd_tsan)" + integration_tests_arm_binary_distributed_plan_1_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 1/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3949,6 +3071,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 1/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3970,16 +3099,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }} - name: "AST fuzzer (amd_msan)" + integration_tests_arm_binary_distributed_plan_2_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 2/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3989,6 +3118,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 2/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4010,16 +3146,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }} - name: "AST fuzzer (amd_ubsan)" + integration_tests_arm_binary_distributed_plan_3_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 3/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4029,6 +3165,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 3/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4050,16 +3193,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }} - name: "BuzzHouse (amd_debug)" + integration_tests_arm_binary_distributed_plan_4_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 4/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4069,6 +3212,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 4/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4090,16 +3240,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhcm1fYXNhbik=') }} - name: "BuzzHouse (arm_asan)" + integration_tests_amd_tsan_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} + name: "Integration tests (amd_tsan, 1/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4109,6 +3259,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4130,16 +3287,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_amd_tsan: - runs-on: [self-hosted, amd-medium] + integration_tests_amd_tsan_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }} - name: "BuzzHouse (amd_tsan)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} + name: "Integration tests (amd_tsan, 2/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4149,6 +3306,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4170,16 +3334,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }} - name: "BuzzHouse (amd_msan)" + integration_tests_amd_tsan_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} + name: "Integration tests (amd_tsan, 3/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4189,6 +3353,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4210,16 +3381,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }} - name: "BuzzHouse (amd_ubsan)" + integration_tests_amd_tsan_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} + name: "Integration tests (amd_tsan, 4/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4229,6 +3400,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4250,16 +3428,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_1_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzYp') }} - name: "Performance Comparison (amd_release, master_head, 1/6)" + integration_tests_amd_tsan_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} + name: "Integration tests (amd_tsan, 5/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4269,6 +3447,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4290,16 +3475,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_2_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzYp') }} - name: "Performance Comparison (amd_release, master_head, 2/6)" + integration_tests_amd_tsan_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} + name: "Integration tests (amd_tsan, 6/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4309,6 +3494,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4330,16 +3522,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_3_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzYp') }} - name: "Performance Comparison (amd_release, master_head, 3/6)" + stress_test_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }} + name: "Stress test (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4349,6 +3541,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4370,16 +3569,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_4_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA0LzYp') }} - name: "Performance Comparison (amd_release, master_head, 4/6)" + stress_test_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} + name: "Stress test (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4389,6 +3588,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4410,16 +3616,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_5_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA1LzYp') }} - name: "Performance Comparison (amd_release, master_head, 5/6)" + stress_test_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }} + name: "Stress test (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4429,6 +3635,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4450,16 +3663,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_6_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA2LzYp') }} - name: "Performance Comparison (amd_release, master_head, 6/6)" + stress_test_arm_asan_s3: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuLCBzMyk=') }} + name: "Stress test (arm_asan, s3)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4469,6 +3682,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (arm_asan, s3)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4490,16 +3710,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (arm_asan, s3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (arm_asan, s3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_1_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzYp') }} - name: "Performance Comparison (arm_release, master_head, 1/6)" + stress_test_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }} + name: "Stress test (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4509,6 +3729,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4530,16 +3757,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_2_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzYp') }} - name: "Performance Comparison (arm_release, master_head, 2/6)" + stress_test_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }} + name: "Stress test (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4549,6 +3776,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4570,16 +3804,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_3_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzYp') }} - name: "Performance Comparison (arm_release, master_head, 3/6)" + ast_fuzzer_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }} + name: "AST fuzzer (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4589,6 +3823,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4610,16 +3851,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_4_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA0LzYp') }} - name: "Performance Comparison (arm_release, master_head, 4/6)" + ast_fuzzer_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYXJtX2FzYW4p') }} + name: "AST fuzzer (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4629,6 +3870,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4650,16 +3898,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_5_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA1LzYp') }} - name: "Performance Comparison (arm_release, master_head, 5/6)" + ast_fuzzer_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }} + name: "AST fuzzer (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4669,6 +3917,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4690,16 +3945,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_6_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA2LzYp') }} - name: "Performance Comparison (arm_release, master_head, 6/6)" + ast_fuzzer_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }} + name: "AST fuzzer (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4709,6 +3964,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4730,16 +3992,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_release_base_1_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgMS82KQ==') }} - name: "Performance Comparison (arm_release, release_base, 1/6)" + ast_fuzzer_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }} + name: "AST fuzzer (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4749,6 +4011,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4770,16 +4039,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_release_base_2_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgMi82KQ==') }} - name: "Performance Comparison (arm_release, release_base, 2/6)" + buzzhouse_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }} + name: "BuzzHouse (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4789,6 +4058,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4810,16 +4086,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_release_base_3_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgMy82KQ==') }} - name: "Performance Comparison (arm_release, release_base, 3/6)" + buzzhouse_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhcm1fYXNhbik=') }} + name: "BuzzHouse (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4829,6 +4105,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4850,16 +4133,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_release_base_4_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgNC82KQ==') }} - name: "Performance Comparison (arm_release, release_base, 4/6)" + buzzhouse_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }} + name: "BuzzHouse (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4869,6 +4152,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4890,16 +4180,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_release_base_5_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgNS82KQ==') }} - name: "Performance Comparison (arm_release, release_base, 5/6)" + buzzhouse_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }} + name: "BuzzHouse (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4909,6 +4199,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4930,16 +4227,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_release_base_6_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgNi82KQ==') }} - name: "Performance Comparison (arm_release, release_base, 6/6)" + buzzhouse_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }} + name: "BuzzHouse (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4949,6 +4246,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4970,13 +4274,13 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, release_base, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi clickbench_amd_release: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q2xpY2tCZW5jaCAoYW1kX3JlbGVhc2Up') }} name: "ClickBench (amd_release)" @@ -4989,6 +4293,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "ClickBench (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5016,7 +4327,7 @@ jobs: fi clickbench_arm_release: - runs-on: [self-hosted, arm-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q2xpY2tCZW5jaCAoYXJtX3JlbGVhc2Up') }} name: "ClickBench (arm_release)" @@ -5029,6 +4340,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "ClickBench (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5056,7 +4374,7 @@ jobs: fi sqltest: - runs-on: [self-hosted, arm-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U1FMVGVzdA==') }} name: "SQLTest" @@ -5069,6 +4387,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "SQLTest" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5096,8 +4421,8 @@ jobs: fi finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary, build_amd_release, build_arm_release, build_amd_darwin, build_arm_darwin, build_arm_v80compat, build_amd_freebsd, build_ppc64le, build_amd_compat, build_amd_musl, build_riscv64, build_s390x, build_loongarch64, build_arm_fuzzers, unit_tests_asan, unit_tests_tsan, unit_tests_msan, unit_tests_ubsan, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, compatibility_check_amd_release, compatibility_check_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_amd_debug_asyncinsert_s3_storage_parallel, stateless_tests_amd_debug_asyncinsert_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_msan_parallel_1_2, stateless_tests_amd_msan_parallel_2_2, stateless_tests_amd_msan_sequential_1_2, stateless_tests_amd_msan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_tsan_s3_storage_parallel_1_2, stateless_tests_amd_tsan_s3_storage_parallel_2_2, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, stateless_tests_arm_asan_azure_parallel, stateless_tests_arm_asan_azure_sequential, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, stress_test_amd_debug, stress_test_amd_tsan, stress_test_arm_asan, stress_test_arm_asan_s3, stress_test_amd_ubsan, stress_test_amd_msan, stress_test_azure_amd_msan, stress_test_azure_amd_tsan, ast_fuzzer_amd_debug, ast_fuzzer_arm_asan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_msan, ast_fuzzer_amd_ubsan, buzzhouse_amd_debug, buzzhouse_arm_asan, buzzhouse_amd_tsan, buzzhouse_amd_msan, buzzhouse_amd_ubsan, performance_comparison_amd_release_master_head_1_6, performance_comparison_amd_release_master_head_2_6, performance_comparison_amd_release_master_head_3_6, performance_comparison_amd_release_master_head_4_6, performance_comparison_amd_release_master_head_5_6, performance_comparison_amd_release_master_head_6_6, performance_comparison_arm_release_master_head_1_6, performance_comparison_arm_release_master_head_2_6, performance_comparison_arm_release_master_head_3_6, performance_comparison_arm_release_master_head_4_6, performance_comparison_arm_release_master_head_5_6, performance_comparison_arm_release_master_head_6_6, performance_comparison_arm_release_release_base_1_6, performance_comparison_arm_release_release_base_2_6, performance_comparison_arm_release_release_base_3_6, performance_comparison_arm_release_release_base_4_6, performance_comparison_arm_release_release_base_5_6, performance_comparison_arm_release_release_base_6_6, clickbench_amd_release, clickbench_arm_release, sqltest] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary, build_amd_release, build_arm_release, unit_tests_asan, unit_tests_tsan, unit_tests_msan, unit_tests_ubsan, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, compatibility_check_amd_release, compatibility_check_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_amd_debug_asyncinsert_s3_storage_parallel, stateless_tests_amd_debug_asyncinsert_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_msan_parallel_1_2, stateless_tests_amd_msan_parallel_2_2, stateless_tests_amd_msan_sequential_1_2, stateless_tests_amd_msan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_tsan_s3_storage_parallel_1_2, stateless_tests_amd_tsan_s3_storage_parallel_2_2, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, stress_test_amd_debug, stress_test_amd_tsan, stress_test_arm_asan, stress_test_arm_asan_s3, stress_test_amd_ubsan, stress_test_amd_msan, ast_fuzzer_amd_debug, ast_fuzzer_arm_asan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_msan, ast_fuzzer_amd_ubsan, buzzhouse_amd_debug, buzzhouse_arm_asan, buzzhouse_amd_tsan, buzzhouse_amd_msan, buzzhouse_amd_ubsan, clickbench_amd_release, clickbench_arm_release, sqltest] if: ${{ always() }} name: "Finish Workflow" outputs: @@ -5109,6 +4434,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5134,3 +4466,224 @@ jobs: else python3 -m praktika run 'Finish Workflow' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log fi + +########################################################################################## +##################################### ALTINITY JOBS ###################################### +########################################################################################## + + GrypeScanServer: + needs: [config_workflow, docker_server_image] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + strategy: + fail-fast: false + matrix: + suffix: ['', '-alpine'] + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-server + version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + tag-suffix: ${{ matrix.suffix }} + GrypeScanKeeper: + needs: [config_workflow, docker_keeper_image] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-keeper + version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + + RegressionTestsRelease: + needs: [config_workflow, build_amd_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).custom_data.ci_exclude_tags, 'regression')}} + uses: ./.github/workflows/regression.yml + secrets: inherit + with: + runner_type: altinity-regression-tester + commit: c5cae9b244e0839fb307a9fb67a40fe80d93810b + arch: release + build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + timeout_minutes: 300 + workflow_config: ${{ needs.config_workflow.outputs.data.workflow_config }} + RegressionTestsAarch64: + needs: [config_workflow, build_arm_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).custom_data.ci_exclude_tags, 'regression') && !contains(fromJson(needs.config_workflow.outputs.data).custom_data.ci_exclude_tags, 'aarch64')}} + uses: ./.github/workflows/regression.yml + secrets: inherit + with: + runner_type: altinity-regression-tester-aarch64 + commit: c5cae9b244e0839fb307a9fb67a40fe80d93810b + arch: aarch64 + build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + timeout_minutes: 300 + workflow_config: ${{ needs.config_workflow.outputs.data.workflow_config }} + + SignRelease: + needs: [config_workflow, build_amd_release] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_sign.yml + secrets: inherit + with: + test_name: Sign release + runner_type: altinity-style-checker + data: ${{ needs.config_workflow.outputs.data }} + SignAarch64: + needs: [config_workflow, build_arm_release] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_sign.yml + secrets: inherit + with: + test_name: Sign aarch64 + runner_type: altinity-style-checker-aarch64 + data: ${{ needs.config_workflow.outputs.data }} + + FinishCIReport: + if: ${{ !cancelled() }} + needs: + - config_workflow + - dockers_build_amd + - dockers_build_arm + - dockers_build_multiplatform_manifest + - build_amd_debug + - build_amd_asan + - build_amd_tsan + - build_amd_msan + - build_amd_ubsan + - build_amd_binary + - build_arm_asan + - build_arm_binary + - build_amd_release + - build_arm_release + - unit_tests_asan + - unit_tests_tsan + - unit_tests_msan + - unit_tests_ubsan + - docker_server_image + - docker_keeper_image + - install_packages_amd_release + - install_packages_arm_release + - compatibility_check_amd_release + - compatibility_check_arm_release + - stateless_tests_amd_asan_distributed_plan_parallel_1_2 + - stateless_tests_amd_asan_distributed_plan_parallel_2_2 + - stateless_tests_amd_asan_db_disk_distributed_plan_sequential + - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel + - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential + - stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel + - stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential + - stateless_tests_amd_debug_asyncinsert_s3_storage_parallel + - stateless_tests_amd_debug_asyncinsert_s3_storage_sequential + - stateless_tests_amd_debug_parallel + - stateless_tests_amd_debug_sequential + - stateless_tests_amd_tsan_parallel_1_2 + - stateless_tests_amd_tsan_parallel_2_2 + - stateless_tests_amd_tsan_sequential_1_2 + - stateless_tests_amd_tsan_sequential_2_2 + - stateless_tests_amd_msan_parallel_1_2 + - stateless_tests_amd_msan_parallel_2_2 + - stateless_tests_amd_msan_sequential_1_2 + - stateless_tests_amd_msan_sequential_2_2 + - stateless_tests_amd_ubsan_parallel + - stateless_tests_amd_ubsan_sequential + - stateless_tests_amd_debug_distributed_plan_s3_storage_parallel + - stateless_tests_amd_debug_distributed_plan_s3_storage_sequential + - stateless_tests_amd_tsan_s3_storage_parallel_1_2 + - stateless_tests_amd_tsan_s3_storage_parallel_2_2 + - stateless_tests_amd_tsan_s3_storage_sequential_1_2 + - stateless_tests_amd_tsan_s3_storage_sequential_2_2 + - stateless_tests_arm_binary_parallel + - stateless_tests_arm_binary_sequential + - integration_tests_amd_asan_db_disk_old_analyzer_1_6 + - integration_tests_amd_asan_db_disk_old_analyzer_2_6 + - integration_tests_amd_asan_db_disk_old_analyzer_3_6 + - integration_tests_amd_asan_db_disk_old_analyzer_4_6 + - integration_tests_amd_asan_db_disk_old_analyzer_5_6 + - integration_tests_amd_asan_db_disk_old_analyzer_6_6 + - integration_tests_amd_binary_1_5 + - integration_tests_amd_binary_2_5 + - integration_tests_amd_binary_3_5 + - integration_tests_amd_binary_4_5 + - integration_tests_amd_binary_5_5 + - integration_tests_arm_binary_distributed_plan_1_4 + - integration_tests_arm_binary_distributed_plan_2_4 + - integration_tests_arm_binary_distributed_plan_3_4 + - integration_tests_arm_binary_distributed_plan_4_4 + - integration_tests_amd_tsan_1_6 + - integration_tests_amd_tsan_2_6 + - integration_tests_amd_tsan_3_6 + - integration_tests_amd_tsan_4_6 + - integration_tests_amd_tsan_5_6 + - integration_tests_amd_tsan_6_6 + - stress_test_amd_debug + - stress_test_amd_tsan + - stress_test_arm_asan + - stress_test_arm_asan_s3 + - stress_test_amd_ubsan + - stress_test_amd_msan + - ast_fuzzer_amd_debug + - ast_fuzzer_arm_asan + - ast_fuzzer_amd_tsan + - ast_fuzzer_amd_msan + - ast_fuzzer_amd_ubsan + - buzzhouse_amd_debug + - buzzhouse_arm_asan + - buzzhouse_amd_tsan + - buzzhouse_amd_msan + - buzzhouse_amd_ubsan + - clickbench_amd_release + - clickbench_arm_release + - sqltest + - finish_workflow + - GrypeScanServer + - GrypeScanKeeper + - RegressionTestsRelease + - RegressionTestsAarch64 + - SignRelease + - SignAarch64 + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + - name: Finalize workflow report + if: ${{ !cancelled() }} + uses: ./.github/actions/create_workflow_report + with: + workflow_config: ${{ needs.config_workflow.outputs.data.workflow_config }} + final: true + + SourceUpload: + needs: [config_workflow, build_amd_release] + if: ${{ !failure() && !cancelled() }} + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + env: + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + VERSION: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + ref: ${{ fromJson(needs.config_workflow.outputs.data).git_ref }} + submodules: true + fetch-depth: 0 + filter: tree:0 + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + - name: Create source tar + run: | + cd .. && tar czf $RUNNER_TEMP/build_source.src.tar.gz ClickHouse/ + - name: Upload source tar + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + S3_PATH="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/build_amd_release" + else + S3_PATH="PRs/$PR_NUMBER/$COMMIT_SHA/build_amd_release" + fi + + aws s3 cp $RUNNER_TEMP/build_source.src.tar.gz s3://altinity-build-artifacts/$S3_PATH/clickhouse-$VERSION.src.tar.gz diff --git a/.github/workflows/merge_queue.yml b/.github/workflows/merge_queue.yml index d80948ec08f1..790b868632fb 100644 --- a/.github/workflows/merge_queue.yml +++ b/.github/workflows/merge_queue.yml @@ -9,12 +9,26 @@ env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 CHECKOUT_REF: "" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }} + AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }} + AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }} + AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + GH_TOKEN: ${{ github.token }} jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -26,6 +40,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -53,7 +87,7 @@ jobs: fi dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -66,6 +100,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -93,7 +134,7 @@ jobs: fi dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -106,45 +147,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log - fi - - style_check: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3R5bGUgY2hlY2s=') }} - name: "Style check" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Dockers Build (arm)" - name: Prepare env script run: | @@ -167,13 +175,13 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Style check' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Style check' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log fi fast_test: - runs-on: [self-hosted, amd-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RmFzdCB0ZXN0') }} name: "Fast test" @@ -186,6 +194,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Fast test" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -213,7 +228,7 @@ jobs: fi build_amd_binary: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }} name: "Build (amd_binary)" @@ -226,6 +241,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_binary)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -253,8 +275,8 @@ jobs: fi finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, style_check, fast_test, build_amd_binary] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, fast_test, build_amd_binary] if: ${{ always() }} name: "Finish Workflow" outputs: @@ -266,6 +288,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp diff --git a/.github/workflows/nightly_coverage.yml b/.github/workflows/nightly_coverage.yml index 307ac2a3fb4a..0130e949e5ad 100644 --- a/.github/workflows/nightly_coverage.yml +++ b/.github/workflows/nightly_coverage.yml @@ -16,7 +16,7 @@ env: jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -28,6 +28,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -55,7 +75,7 @@ jobs: fi dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -68,6 +88,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -95,7 +122,7 @@ jobs: fi dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -108,6 +135,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -135,7 +169,7 @@ jobs: fi build_amd_coverage: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9jb3ZlcmFnZSk=') }} name: "Build (amd_coverage)" @@ -148,6 +182,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_coverage)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -175,7 +216,7 @@ jobs: fi stateless_tests_amd_coverage_1_8: - runs-on: [self-hosted, amd-small] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_coverage] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDEvOCk=') }} name: "Stateless tests (amd_coverage, 1/8)" @@ -188,6 +229,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 1/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -215,7 +263,7 @@ jobs: fi stateless_tests_amd_coverage_2_8: - runs-on: [self-hosted, amd-small] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_coverage] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDIvOCk=') }} name: "Stateless tests (amd_coverage, 2/8)" @@ -228,6 +276,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 2/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -255,7 +310,7 @@ jobs: fi stateless_tests_amd_coverage_3_8: - runs-on: [self-hosted, amd-small] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_coverage] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDMvOCk=') }} name: "Stateless tests (amd_coverage, 3/8)" @@ -268,6 +323,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 3/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -295,7 +357,7 @@ jobs: fi stateless_tests_amd_coverage_4_8: - runs-on: [self-hosted, amd-small] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_coverage] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDQvOCk=') }} name: "Stateless tests (amd_coverage, 4/8)" @@ -308,6 +370,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 4/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -335,7 +404,7 @@ jobs: fi stateless_tests_amd_coverage_5_8: - runs-on: [self-hosted, amd-small] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_coverage] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDUvOCk=') }} name: "Stateless tests (amd_coverage, 5/8)" @@ -348,6 +417,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 5/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -375,7 +451,7 @@ jobs: fi stateless_tests_amd_coverage_6_8: - runs-on: [self-hosted, amd-small] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_coverage] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDYvOCk=') }} name: "Stateless tests (amd_coverage, 6/8)" @@ -388,6 +464,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 6/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -415,7 +498,7 @@ jobs: fi stateless_tests_amd_coverage_7_8: - runs-on: [self-hosted, amd-small] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_coverage] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDcvOCk=') }} name: "Stateless tests (amd_coverage, 7/8)" @@ -428,6 +511,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 7/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -455,7 +545,7 @@ jobs: fi stateless_tests_amd_coverage_8_8: - runs-on: [self-hosted, amd-small] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_coverage] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDgvOCk=') }} name: "Stateless tests (amd_coverage, 8/8)" @@ -468,6 +558,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 8/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -495,7 +592,7 @@ jobs: fi finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_coverage, stateless_tests_amd_coverage_1_8, stateless_tests_amd_coverage_2_8, stateless_tests_amd_coverage_3_8, stateless_tests_amd_coverage_4_8, stateless_tests_amd_coverage_5_8, stateless_tests_amd_coverage_6_8, stateless_tests_amd_coverage_7_8, stateless_tests_amd_coverage_8_8] if: ${{ always() }} name: "Finish Workflow" @@ -508,6 +605,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp diff --git a/.github/workflows/nightly_fuzzers.yml b/.github/workflows/nightly_fuzzers.yml index ff5f3b0c0a1a..640032352e28 100644 --- a/.github/workflows/nightly_fuzzers.yml +++ b/.github/workflows/nightly_fuzzers.yml @@ -16,7 +16,7 @@ env: jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -28,6 +28,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -55,7 +75,7 @@ jobs: fi dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -68,6 +88,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -95,7 +122,7 @@ jobs: fi dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -108,6 +135,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -135,7 +169,7 @@ jobs: fi build_arm_fuzzers: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9mdXp6ZXJzKQ==') }} name: "Build (arm_fuzzers)" @@ -148,6 +182,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_fuzzers)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -175,7 +216,7 @@ jobs: fi libfuzzer_tests: - runs-on: [self-hosted, arm-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_fuzzers] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'bGliRnV6emVyIHRlc3Rz') }} name: "libFuzzer tests" @@ -188,6 +229,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "libFuzzer tests" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -215,7 +263,7 @@ jobs: fi finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_fuzzers, libfuzzer_tests] if: ${{ always() }} name: "Finish Workflow" @@ -228,6 +276,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp diff --git a/.github/workflows/nightly_jepsen.yml b/.github/workflows/nightly_jepsen.yml index c89565dadaba..bf79fb2c1e54 100644 --- a/.github/workflows/nightly_jepsen.yml +++ b/.github/workflows/nightly_jepsen.yml @@ -16,7 +16,7 @@ env: jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -28,6 +28,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -55,7 +75,7 @@ jobs: fi dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -68,6 +88,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -95,7 +122,7 @@ jobs: fi dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -108,6 +135,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -135,7 +169,7 @@ jobs: fi build_amd_binary: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }} name: "Build (amd_binary)" @@ -148,6 +182,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_binary)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -175,7 +216,7 @@ jobs: fi clickhouse_keeper_jepsen: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q2xpY2tIb3VzZSBLZWVwZXIgSmVwc2Vu') }} name: "ClickHouse Keeper Jepsen" @@ -188,6 +229,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "ClickHouse Keeper Jepsen" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -215,7 +263,7 @@ jobs: fi finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, clickhouse_keeper_jepsen] if: ${{ always() }} name: "Finish Workflow" @@ -228,6 +276,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp diff --git a/.github/workflows/nightly_statistics.yml b/.github/workflows/nightly_statistics.yml index 235fec19958c..e12766660137 100644 --- a/.github/workflows/nightly_statistics.yml +++ b/.github/workflows/nightly_statistics.yml @@ -16,7 +16,7 @@ env: jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -28,6 +28,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -55,7 +75,7 @@ jobs: fi collect_statistics: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] name: "Collect Statistics" outputs: @@ -67,6 +87,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Collect Statistics" + - name: Prepare env script run: | rm -rf ./ci/tmp diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 154885f0e9b6..c4841e215a81 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -3,15 +3,36 @@ name: PR on: + workflow_dispatch: + inputs: + no_cache: + description: Run without cache + required: false + type: boolean + default: false pull_request: - branches: ['master'] + branches: ['antalya', 'releases/*', 'antalya-*'] env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 DISABLE_CI_MERGE_COMMIT: ${{ vars.DISABLE_CI_MERGE_COMMIT || '0' }} - DISABLE_CI_CACHE: ${{ vars.DISABLE_CI_CACHE || '0' }} + DISABLE_CI_CACHE: ${{ github.event.inputs.no_cache || '0' }} CHECKOUT_REF: ${{ vars.DISABLE_CI_MERGE_COMMIT == '1' && github.event.pull_request.head.sha || '' }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }} + AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }} + AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }} + AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + GH_TOKEN: ${{ github.token }} # Allow updating GH commit statuses and PR comments to post an actual job reports link permissions: write-all @@ -19,7 +40,7 @@ permissions: write-all jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -31,6 +52,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -58,7 +99,7 @@ jobs: fi dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -71,6 +112,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -98,7 +146,7 @@ jobs: fi dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -111,6 +159,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -138,7 +193,7 @@ jobs: fi dockers_build_multiplatform_manifest: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAobXVsdGlwbGF0Zm9ybSBtYW5pZmVzdCk=') }} name: "Dockers Build (multiplatform manifest)" @@ -151,85 +206,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - style_check: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3R5bGUgY2hlY2s=') }} - name: "Style check" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Style check' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Style check' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - docs_check: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9jcyBjaGVjaw==') }} - name: "Docs check" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Dockers Build (multiplatform manifest)" - name: Prepare env script run: | @@ -252,13 +234,13 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Docs check' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Docs check' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi fast_test: - runs-on: [self-hosted, amd-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RmFzdCB0ZXN0') }} name: "Fast test" @@ -271,45 +253,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Fast test' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Fast test' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - build_arm_tidy: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV90aWR5KQ==') }} - name: "Build (arm_tidy)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Fast test" - name: Prepare env script run: | @@ -332,14 +281,14 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_tidy)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Fast test' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_tidy)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Fast test' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi build_amd_debug: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} name: "Build (amd_debug)" outputs: @@ -351,6 +300,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -378,8 +334,8 @@ jobs: fi build_amd_asan: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} name: "Build (amd_asan)" outputs: @@ -391,6 +347,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -418,8 +381,8 @@ jobs: fi build_amd_tsan: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} name: "Build (amd_tsan)" outputs: @@ -431,6 +394,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -458,8 +428,8 @@ jobs: fi build_amd_msan: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }} name: "Build (amd_msan)" outputs: @@ -471,6 +441,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -498,8 +475,8 @@ jobs: fi build_amd_ubsan: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }} name: "Build (amd_ubsan)" outputs: @@ -511,6 +488,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -538,8 +522,8 @@ jobs: fi build_amd_binary: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }} name: "Build (amd_binary)" outputs: @@ -551,6 +535,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_binary)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -578,8 +569,8 @@ jobs: fi build_arm_asan: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }} name: "Build (arm_asan)" outputs: @@ -591,6 +582,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -618,8 +616,8 @@ jobs: fi build_arm_binary: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }} name: "Build (arm_binary)" outputs: @@ -631,6 +629,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_binary)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -658,8 +663,8 @@ jobs: fi build_arm_tsan: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV90c2FuKQ==') }} name: "Build (arm_tsan)" outputs: @@ -671,6 +676,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -698,8 +710,8 @@ jobs: fi build_amd_release: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} name: "Build (amd_release)" outputs: @@ -711,6 +723,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -738,8 +757,8 @@ jobs: fi build_arm_release: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} name: "Build (arm_release)" outputs: @@ -751,6 +770,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -777,11 +803,11 @@ jobs: python3 -m praktika run 'Build (arm_release)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - build_amd_darwin: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }} - name: "Build (amd_darwin)" + quick_functional_tests: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UXVpY2sgZnVuY3Rpb25hbCB0ZXN0cw==') }} + name: "Quick functional tests" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -791,6 +817,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Quick functional tests" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -812,16 +845,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (amd_darwin)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Quick functional tests' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (amd_darwin)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Quick functional tests' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - build_arm_darwin: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }} - name: "Build (arm_darwin)" + stateless_tests_arm_asan_targeted: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_arm_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgdGFyZ2V0ZWQp') }} + name: "Stateless tests (arm_asan, targeted)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -831,6 +864,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_asan, targeted)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -852,16 +892,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_darwin)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_asan, targeted)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_darwin)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_asan, targeted)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - build_arm_v80compat: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV92ODBjb21wYXQp') }} - name: "Build (arm_v80compat)" + integration_tests_amd_asan_targeted: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCB0YXJnZXRlZCk=') }} + name: "Integration tests (amd_asan, targeted)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -871,6 +911,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, targeted)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -892,16 +939,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_v80compat)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, targeted)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_v80compat)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, targeted)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - build_amd_freebsd: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9mcmVlYnNkKQ==') }} - name: "Build (amd_freebsd)" + stateless_tests_amd_asan_flaky_check: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZmxha3kgY2hlY2sp') }} + name: "Stateless tests (amd_asan, flaky check)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -911,6 +958,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, flaky check)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -932,16 +986,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (amd_freebsd)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, flaky check)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (amd_freebsd)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, flaky check)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - build_ppc64le: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKHBwYzY0bGUp') }} - name: "Build (ppc64le)" + integration_tests_amd_asan_flaky: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBmbGFreSk=') }} + name: "Integration tests (amd_asan, flaky)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -951,6 +1005,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, flaky)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -972,16 +1033,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (ppc64le)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, flaky)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (ppc64le)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, flaky)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - build_amd_compat: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9jb21wYXQp') }} - name: "Build (amd_compat)" + bugfix_validation_functional_tests: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVnZml4IHZhbGlkYXRpb24gKGZ1bmN0aW9uYWwgdGVzdHMp') }} + name: "Bugfix validation (functional tests)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -991,6 +1052,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Bugfix validation (functional tests)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1012,16 +1080,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (amd_compat)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Bugfix validation (functional tests)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (amd_compat)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Bugfix validation (functional tests)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - build_amd_musl: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9tdXNsKQ==') }} - name: "Build (amd_musl)" + bugfix_validation_integration_tests: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVnZml4IHZhbGlkYXRpb24gKGludGVncmF0aW9uIHRlc3RzKQ==') }} + name: "Bugfix validation (integration tests)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1031,6 +1099,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Bugfix validation (integration tests)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1052,16 +1127,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (amd_musl)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Bugfix validation (integration tests)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (amd_musl)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Bugfix validation (integration tests)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - build_riscv64: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKHJpc2N2NjQp') }} - name: "Build (riscv64)" + stateless_tests_amd_asan_distributed_plan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1071,6 +1146,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1092,16 +1174,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (riscv64)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (riscv64)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - build_s390x: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKHMzOTB4KQ==') }} - name: "Build (s390x)" + stateless_tests_amd_asan_distributed_plan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1111,6 +1193,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1132,16 +1221,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (s390x)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (s390x)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - build_loongarch64: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGxvb25nYXJjaDY0KQ==') }} - name: "Build (loongarch64)" + stateless_tests_amd_asan_db_disk_distributed_plan_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGIgZGlzaywgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} + name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1151,6 +1240,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1172,16 +1268,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (loongarch64)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (loongarch64)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - build_arm_fuzzers: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9mdXp6ZXJzKQ==') }} - name: "Build (arm_fuzzers)" + stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgcGFyYWxsZWwp') }} + name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1191,6 +1287,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1212,16 +1315,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Build (arm_fuzzers)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Build (arm_fuzzers)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - quick_functional_tests: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UXVpY2sgZnVuY3Rpb25hbCB0ZXN0cw==') }} - name: "Quick functional tests" + stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgc2VxdWVudGlhbCk=') }} + name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1231,6 +1334,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1252,16 +1362,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Quick functional tests' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Quick functional tests' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_arm_asan_targeted: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_arm_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgdGFyZ2V0ZWQp') }} - name: "Stateless tests (arm_asan, targeted)" + stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} + name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1271,6 +1381,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1292,16 +1409,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_asan, targeted)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (arm_asan, targeted)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_targeted: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCB0YXJnZXRlZCk=') }} - name: "Integration tests (amd_asan, targeted)" + stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1311,6 +1428,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1332,1096 +1456,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, targeted)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, targeted)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_amd_asan_flaky_check: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZmxha3kgY2hlY2sp') }} - name: "Stateless tests (amd_asan, flaky check)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_asan, flaky check)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_asan, flaky check)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - integration_tests_amd_asan_flaky: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBmbGFreSk=') }} - name: "Integration tests (amd_asan, flaky)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, flaky)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Integration tests (amd_asan, flaky)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - bugfix_validation_functional_tests: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVnZml4IHZhbGlkYXRpb24gKGZ1bmN0aW9uYWwgdGVzdHMp') }} - name: "Bugfix validation (functional tests)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Bugfix validation (functional tests)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Bugfix validation (functional tests)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - bugfix_validation_integration_tests: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVnZml4IHZhbGlkYXRpb24gKGludGVncmF0aW9uIHRlc3RzKQ==') }} - name: "Bugfix validation (integration tests)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Bugfix validation (integration tests)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Bugfix validation (integration tests)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_asan_distributed_plan_parallel_1_2: - runs-on: [self-hosted, amd-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_asan_distributed_plan_parallel_2_2: - runs-on: [self-hosted, amd-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_asan_db_disk_distributed_plan_sequential: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGIgZGlzaywgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} - name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgcGFyYWxsZWwp') }} - name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgc2VxdWVudGlhbCk=') }} - name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} - name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_asyncinsert_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} - name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_asyncinsert_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_parallel: - runs-on: [self-hosted, amd-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_debug, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_debug, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_parallel_1_2: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_tsan, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_parallel_2_2: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_tsan, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_sequential_1_2: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_tsan, sequential, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_sequential_2_2: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_tsan, sequential, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_msan_parallel_1_2: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_msan, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_msan_parallel_2_2: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_msan, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_msan_sequential_1_2: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_msan, sequential, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_msan_sequential_2_2: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_msan, sequential, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_ubsan_parallel: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_ubsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_ubsan, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_ubsan_sequential: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_ubsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_ubsan, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_distributed_plan_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_debug_distributed_plan_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log - else - python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log - fi - - stateless_tests_amd_tsan_s3_storage_parallel_1_2: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" + stateless_tests_amd_debug_asyncinsert_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} + name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2431,6 +1475,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2452,16 +1503,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_amd_tsan_s3_storage_parallel_2_2: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" + stateless_tests_amd_debug_asyncinsert_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2471,6 +1522,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2492,16 +1550,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_amd_tsan_s3_storage_sequential_1_2: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" + stateless_tests_amd_debug_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2511,6 +1569,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2532,16 +1597,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_amd_tsan_s3_storage_sequential_2_2: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" + stateless_tests_amd_debug_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_debug, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2551,6 +1616,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2572,16 +1644,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_arm_binary_parallel: - runs-on: [self-hosted, arm-medium-cpu] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_arm_binary] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }} - name: "Stateless tests (arm_binary, parallel)" + stateless_tests_amd_tsan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_tsan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2591,6 +1663,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2612,16 +1691,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stateless_tests_arm_binary_sequential: - runs-on: [self-hosted, arm-small] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (arm_binary, sequential)" + stateless_tests_amd_tsan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_tsan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2631,6 +1710,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2652,16 +1738,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_db_disk_old_analyzer_1_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" + stateless_tests_amd_tsan_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_tsan, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2671,6 +1757,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2692,16 +1785,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_db_disk_old_analyzer_2_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" + stateless_tests_amd_tsan_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_tsan, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2711,6 +1804,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2732,16 +1832,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_db_disk_old_analyzer_3_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" + stateless_tests_amd_msan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_msan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2751,6 +1851,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2772,16 +1879,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_db_disk_old_analyzer_4_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" + stateless_tests_amd_msan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_msan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2791,6 +1898,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2812,16 +1926,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_db_disk_old_analyzer_5_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" + stateless_tests_amd_msan_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_msan, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2831,6 +1945,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2852,16 +1973,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_asan_db_disk_old_analyzer_6_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" + stateless_tests_amd_msan_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_msan, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2871,6 +1992,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2892,16 +2020,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_1_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }} - name: "Integration tests (amd_binary, 1/5)" + stateless_tests_amd_ubsan_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_ubsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_ubsan, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2911,6 +2039,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_ubsan, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2932,16 +2067,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_2_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }} - name: "Integration tests (amd_binary, 2/5)" + stateless_tests_amd_ubsan_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_ubsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_ubsan, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2951,6 +2086,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_ubsan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2972,16 +2114,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_3_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }} - name: "Integration tests (amd_binary, 3/5)" + stateless_tests_amd_debug_distributed_plan_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2991,6 +2133,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3012,16 +2161,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_4_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }} - name: "Integration tests (amd_binary, 4/5)" + stateless_tests_amd_debug_distributed_plan_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3031,6 +2180,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3052,16 +2208,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_binary_5_5: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }} - name: "Integration tests (amd_binary, 5/5)" + stateless_tests_amd_tsan_s3_storage_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3071,6 +2227,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3092,16 +2255,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_arm_binary_distributed_plan_1_4: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 1/4)" + stateless_tests_amd_tsan_s3_storage_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3111,6 +2274,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3132,16 +2302,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_arm_binary_distributed_plan_2_4: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 2/4)" + stateless_tests_amd_tsan_s3_storage_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3151,6 +2321,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3172,16 +2349,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_arm_binary_distributed_plan_3_4: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 3/4)" + stateless_tests_amd_tsan_s3_storage_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3191,6 +2368,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3212,16 +2396,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_arm_binary_distributed_plan_4_4: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 4/4)" + stateless_tests_arm_binary_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }} + name: "Stateless tests (arm_binary, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3231,6 +2415,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3252,16 +2443,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_1_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} - name: "Integration tests (amd_tsan, 1/6)" + stateless_tests_arm_binary_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (arm_binary, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3271,6 +2462,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3292,16 +2490,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_2_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} - name: "Integration tests (amd_tsan, 2/6)" + integration_tests_amd_asan_db_disk_old_analyzer_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3311,6 +2509,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3332,16 +2537,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_3_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} - name: "Integration tests (amd_tsan, 3/6)" + integration_tests_amd_asan_db_disk_old_analyzer_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3351,6 +2556,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3372,16 +2584,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_4_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} - name: "Integration tests (amd_tsan, 4/6)" + integration_tests_amd_asan_db_disk_old_analyzer_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3391,6 +2603,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3412,16 +2631,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_5_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} - name: "Integration tests (amd_tsan, 5/6)" + integration_tests_amd_asan_db_disk_old_analyzer_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3431,6 +2650,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3452,16 +2678,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - integration_tests_amd_tsan_6_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} - name: "Integration tests (amd_tsan, 6/6)" + integration_tests_amd_asan_db_disk_old_analyzer_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3471,6 +2697,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3492,16 +2725,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - unit_tests_asan: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_asan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }} - name: "Unit tests (asan)" + integration_tests_amd_asan_db_disk_old_analyzer_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3511,6 +2744,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3532,16 +2772,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Unit tests (asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Unit tests (asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - unit_tests_tsan: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_tsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }} - name: "Unit tests (tsan)" + integration_tests_amd_binary_1_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }} + name: "Integration tests (amd_binary, 1/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3551,6 +2791,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 1/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3572,16 +2819,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Unit tests (tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Unit tests (tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - unit_tests_msan: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_msan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }} - name: "Unit tests (msan)" + integration_tests_amd_binary_2_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }} + name: "Integration tests (amd_binary, 2/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3591,6 +2838,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 2/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3612,16 +2866,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Unit tests (msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Unit tests (msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - unit_tests_ubsan: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_ubsan] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }} - name: "Unit tests (ubsan)" + integration_tests_amd_binary_3_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }} + name: "Integration tests (amd_binary, 3/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3631,6 +2885,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 3/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3652,16 +2913,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Unit tests (ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Unit tests (ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - docker_server_image: - runs-on: [self-hosted, style-checker] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} - name: "Docker server image" + integration_tests_amd_binary_4_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }} + name: "Integration tests (amd_binary, 4/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3671,6 +2932,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 4/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3692,16 +2960,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Docker server image' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Docker server image' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - docker_keeper_image: - runs-on: [self-hosted, style-checker] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} - name: "Docker keeper image" + integration_tests_amd_binary_5_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }} + name: "Integration tests (amd_binary, 5/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3711,6 +2979,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 5/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3732,16 +3007,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Docker keeper image' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Docker keeper image' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - install_packages_amd_release: - runs-on: [self-hosted, style-checker] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} - name: "Install packages (amd_release)" + integration_tests_arm_binary_distributed_plan_1_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 1/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3751,6 +3026,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 1/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3772,16 +3054,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Install packages (amd_release)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Install packages (amd_release)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - install_packages_arm_release: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} - name: "Install packages (arm_release)" + integration_tests_arm_binary_distributed_plan_2_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 2/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3791,6 +3073,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 2/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3812,16 +3101,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Install packages (arm_release)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Install packages (arm_release)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - compatibility_check_amd_release: - runs-on: [self-hosted, style-checker] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYW1kX3JlbGVhc2Up') }} - name: "Compatibility check (amd_release)" + integration_tests_arm_binary_distributed_plan_3_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 3/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3831,6 +3120,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 3/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3852,16 +3148,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Compatibility check (amd_release)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Compatibility check (amd_release)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - compatibility_check_arm_release: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYXJtX3JlbGVhc2Up') }} - name: "Compatibility check (arm_release)" + integration_tests_arm_binary_distributed_plan_4_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 4/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3871,6 +3167,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 4/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3892,16 +3195,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Compatibility check (arm_release)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Compatibility check (arm_release)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stress_test_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }} - name: "Stress test (amd_debug)" + integration_tests_amd_tsan_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} + name: "Integration tests (amd_tsan, 1/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3911,6 +3214,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3932,16 +3242,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stress_test_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} - name: "Stress test (amd_tsan)" + integration_tests_amd_tsan_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} + name: "Integration tests (amd_tsan, 2/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3951,6 +3261,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3972,16 +3289,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stress_test_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }} - name: "Stress test (arm_asan)" + integration_tests_amd_tsan_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} + name: "Integration tests (amd_tsan, 3/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3991,6 +3308,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4012,16 +3336,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (arm_asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (arm_asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stress_test_arm_asan_s3: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuLCBzMyk=') }} - name: "Stress test (arm_asan, s3)" + integration_tests_amd_tsan_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} + name: "Integration tests (amd_tsan, 4/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4031,6 +3355,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4052,16 +3383,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (arm_asan, s3)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (arm_asan, s3)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stress_test_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_ubsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }} - name: "Stress test (amd_ubsan)" + integration_tests_amd_tsan_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} + name: "Integration tests (amd_tsan, 5/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4071,6 +3402,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4092,16 +3430,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - stress_test_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }} - name: "Stress test (amd_msan)" + integration_tests_amd_tsan_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} + name: "Integration tests (amd_tsan, 6/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4111,6 +3449,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4132,16 +3477,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Stress test (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Stress test (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - upgrade_check_amd_asan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VXBncmFkZSBjaGVjayAoYW1kX2FzYW4p') }} - name: "Upgrade check (amd_asan)" + unit_tests_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }} + name: "Unit tests (asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4151,6 +3496,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4172,16 +3524,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Upgrade check (amd_asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Upgrade check (amd_asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - upgrade_check_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VXBncmFkZSBjaGVjayAoYW1kX3RzYW4p') }} - name: "Upgrade check (amd_tsan)" + unit_tests_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }} + name: "Unit tests (tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4191,6 +3543,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4212,16 +3571,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Upgrade check (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Upgrade check (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - upgrade_check_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VXBncmFkZSBjaGVjayAoYW1kX21zYW4p') }} - name: "Upgrade check (amd_msan)" + unit_tests_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_msan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }} + name: "Unit tests (msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4231,6 +3590,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4252,16 +3618,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Upgrade check (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Upgrade check (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - upgrade_check_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VXBncmFkZSBjaGVjayAoYW1kX2RlYnVnKQ==') }} - name: "Upgrade check (amd_debug)" + unit_tests_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_ubsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }} + name: "Unit tests (ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4271,6 +3637,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4292,16 +3665,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Upgrade check (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Upgrade check (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Unit tests (ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }} - name: "AST fuzzer (amd_debug)" + docker_server_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + name: "Docker server image" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4311,6 +3684,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker server image" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4332,16 +3712,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Docker server image' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Docker server image' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYXJtX2FzYW4p') }} - name: "AST fuzzer (arm_asan)" + docker_keeper_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + name: "Docker keeper image" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4351,6 +3731,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker keeper image" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4372,16 +3759,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Docker keeper image' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Docker keeper image' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }} - name: "AST fuzzer (amd_tsan)" + install_packages_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} + name: "Install packages (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4391,6 +3778,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4412,16 +3806,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Install packages (amd_release)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Install packages (amd_release)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }} - name: "AST fuzzer (amd_msan)" + install_packages_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} + name: "Install packages (arm_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4431,6 +3825,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4452,16 +3853,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Install packages (arm_release)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Install packages (arm_release)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - ast_fuzzer_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_ubsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }} - name: "AST fuzzer (amd_ubsan)" + compatibility_check_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYW1kX3JlbGVhc2Up') }} + name: "Compatibility check (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4471,6 +3872,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4492,16 +3900,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Compatibility check (amd_release)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Compatibility check (amd_release)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }} - name: "BuzzHouse (amd_debug)" + compatibility_check_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYXJtX3JlbGVhc2Up') }} + name: "Compatibility check (arm_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4511,6 +3919,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4532,16 +3947,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Compatibility check (arm_release)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Compatibility check (arm_release)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhcm1fYXNhbik=') }} - name: "BuzzHouse (arm_asan)" + stress_test_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }} + name: "Stress test (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4551,6 +3966,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4572,16 +3994,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }} - name: "BuzzHouse (amd_tsan)" + stress_test_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} + name: "Stress test (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4591,6 +4013,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4612,16 +4041,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }} - name: "BuzzHouse (amd_msan)" + stress_test_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }} + name: "Stress test (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4631,6 +4060,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4652,16 +4088,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (arm_asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (arm_asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - buzzhouse_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_ubsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }} - name: "BuzzHouse (amd_ubsan)" + stress_test_arm_asan_s3: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuLCBzMyk=') }} + name: "Stress test (arm_asan, s3)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4671,6 +4107,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (arm_asan, s3)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4692,16 +4135,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (arm_asan, s3)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (arm_asan, s3)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_1_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzYp') }} - name: "Performance Comparison (amd_release, master_head, 1/6)" + stress_test_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_ubsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }} + name: "Stress test (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4711,6 +4154,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4732,16 +4182,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 1/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 1/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_2_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzYp') }} - name: "Performance Comparison (amd_release, master_head, 2/6)" + stress_test_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }} + name: "Stress test (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4751,6 +4201,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4772,16 +4229,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 2/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 2/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'Stress test (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_3_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzYp') }} - name: "Performance Comparison (amd_release, master_head, 3/6)" + ast_fuzzer_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }} + name: "AST fuzzer (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4791,6 +4248,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4812,16 +4276,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 3/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 3/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_4_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA0LzYp') }} - name: "Performance Comparison (amd_release, master_head, 4/6)" + ast_fuzzer_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYXJtX2FzYW4p') }} + name: "AST fuzzer (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4831,6 +4295,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4852,16 +4323,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 4/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 4/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_5_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA1LzYp') }} - name: "Performance Comparison (amd_release, master_head, 5/6)" + ast_fuzzer_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }} + name: "AST fuzzer (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4871,6 +4342,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4892,16 +4370,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 5/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 5/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_amd_release_master_head_6_6: - runs-on: [self-hosted, amd-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA2LzYp') }} - name: "Performance Comparison (amd_release, master_head, 6/6)" + ast_fuzzer_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }} + name: "AST fuzzer (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4911,6 +4389,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4932,16 +4417,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 6/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (amd_release, master_head, 6/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_1_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzYp') }} - name: "Performance Comparison (arm_release, master_head, 1/6)" + ast_fuzzer_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_ubsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }} + name: "AST fuzzer (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4951,6 +4436,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4972,16 +4464,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 1/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 1/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_2_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzYp') }} - name: "Performance Comparison (arm_release, master_head, 2/6)" + buzzhouse_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }} + name: "BuzzHouse (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4991,6 +4483,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5012,16 +4511,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 2/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 2/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_3_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzYp') }} - name: "Performance Comparison (arm_release, master_head, 3/6)" + buzzhouse_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhcm1fYXNhbik=') }} + name: "BuzzHouse (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5031,6 +4530,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5052,16 +4558,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 3/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 3/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_4_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA0LzYp') }} - name: "Performance Comparison (arm_release, master_head, 4/6)" + buzzhouse_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }} + name: "BuzzHouse (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5071,6 +4577,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5092,16 +4605,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 4/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 4/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_5_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA1LzYp') }} - name: "Performance Comparison (arm_release, master_head, 5/6)" + buzzhouse_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }} + name: "BuzzHouse (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5111,6 +4624,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5132,16 +4652,16 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 5/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 5/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi - performance_comparison_arm_release_master_head_6_6: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA2LzYp') }} - name: "Performance Comparison (arm_release, master_head, 6/6)" + buzzhouse_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_ubsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }} + name: "BuzzHouse (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5151,6 +4671,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5172,14 +4699,14 @@ jobs: . ./ci/tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 6/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log else - python3 -m praktika run 'Performance Comparison (arm_release, master_head, 6/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log + python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, docs_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary, build_arm_tsan, build_amd_release, build_arm_release, build_amd_darwin, build_arm_darwin, build_arm_v80compat, build_amd_freebsd, build_ppc64le, build_amd_compat, build_amd_musl, build_riscv64, build_s390x, build_loongarch64, build_arm_fuzzers, quick_functional_tests, stateless_tests_arm_asan_targeted, integration_tests_amd_asan_targeted, stateless_tests_amd_asan_flaky_check, integration_tests_amd_asan_flaky, bugfix_validation_functional_tests, bugfix_validation_integration_tests, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_amd_debug_asyncinsert_s3_storage_parallel, stateless_tests_amd_debug_asyncinsert_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_msan_parallel_1_2, stateless_tests_amd_msan_parallel_2_2, stateless_tests_amd_msan_sequential_1_2, stateless_tests_amd_msan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_tsan_s3_storage_parallel_1_2, stateless_tests_amd_tsan_s3_storage_parallel_2_2, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, unit_tests_asan, unit_tests_tsan, unit_tests_msan, unit_tests_ubsan, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, compatibility_check_amd_release, compatibility_check_arm_release, stress_test_amd_debug, stress_test_amd_tsan, stress_test_arm_asan, stress_test_arm_asan_s3, stress_test_amd_ubsan, stress_test_amd_msan, upgrade_check_amd_asan, upgrade_check_amd_tsan, upgrade_check_amd_msan, upgrade_check_amd_debug, ast_fuzzer_amd_debug, ast_fuzzer_arm_asan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_msan, ast_fuzzer_amd_ubsan, buzzhouse_amd_debug, buzzhouse_arm_asan, buzzhouse_amd_tsan, buzzhouse_amd_msan, buzzhouse_amd_ubsan, performance_comparison_amd_release_master_head_1_6, performance_comparison_amd_release_master_head_2_6, performance_comparison_amd_release_master_head_3_6, performance_comparison_amd_release_master_head_4_6, performance_comparison_amd_release_master_head_5_6, performance_comparison_amd_release_master_head_6_6, performance_comparison_arm_release_master_head_1_6, performance_comparison_arm_release_master_head_2_6, performance_comparison_arm_release_master_head_3_6, performance_comparison_arm_release_master_head_4_6, performance_comparison_arm_release_master_head_5_6, performance_comparison_arm_release_master_head_6_6] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary, build_arm_tsan, build_amd_release, build_arm_release, quick_functional_tests, stateless_tests_arm_asan_targeted, integration_tests_amd_asan_targeted, stateless_tests_amd_asan_flaky_check, integration_tests_amd_asan_flaky, bugfix_validation_functional_tests, bugfix_validation_integration_tests, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_amd_debug_asyncinsert_s3_storage_parallel, stateless_tests_amd_debug_asyncinsert_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_msan_parallel_1_2, stateless_tests_amd_msan_parallel_2_2, stateless_tests_amd_msan_sequential_1_2, stateless_tests_amd_msan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_tsan_s3_storage_parallel_1_2, stateless_tests_amd_tsan_s3_storage_parallel_2_2, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, unit_tests_asan, unit_tests_tsan, unit_tests_msan, unit_tests_ubsan, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, compatibility_check_amd_release, compatibility_check_arm_release, stress_test_amd_debug, stress_test_amd_tsan, stress_test_arm_asan, stress_test_arm_asan_s3, stress_test_amd_ubsan, stress_test_amd_msan, ast_fuzzer_amd_debug, ast_fuzzer_arm_asan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_msan, ast_fuzzer_amd_ubsan, buzzhouse_amd_debug, buzzhouse_arm_asan, buzzhouse_amd_tsan, buzzhouse_amd_msan, buzzhouse_amd_ubsan] if: ${{ always() }} name: "Finish Workflow" outputs: @@ -5191,6 +4718,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5216,3 +4750,209 @@ jobs: else python3 -m praktika run 'Finish Workflow' --workflow "PR" --ci |& tee ./ci/tmp/job.log fi + +########################################################################################## +##################################### ALTINITY JOBS ###################################### +########################################################################################## + + GrypeScanServer: + needs: [config_workflow, docker_server_image] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + strategy: + fail-fast: false + matrix: + suffix: ['', '-alpine'] + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-server + version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + tag-suffix: ${{ matrix.suffix }} + GrypeScanKeeper: + needs: [config_workflow, docker_keeper_image] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-keeper + version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + + RegressionTestsRelease: + needs: [config_workflow, build_amd_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).custom_data.ci_exclude_tags, 'regression')}} + uses: ./.github/workflows/regression.yml + secrets: inherit + with: + runner_type: altinity-regression-tester + commit: c5cae9b244e0839fb307a9fb67a40fe80d93810b + arch: release + build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + timeout_minutes: 300 + workflow_config: ${{ needs.config_workflow.outputs.data.workflow_config }} + RegressionTestsAarch64: + needs: [config_workflow, build_arm_binary] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).custom_data.ci_exclude_tags, 'regression') && !contains(fromJson(needs.config_workflow.outputs.data).custom_data.ci_exclude_tags, 'aarch64')}} + uses: ./.github/workflows/regression.yml + secrets: inherit + with: + runner_type: altinity-regression-tester-aarch64 + commit: c5cae9b244e0839fb307a9fb67a40fe80d93810b + arch: aarch64 + build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + timeout_minutes: 300 + workflow_config: ${{ needs.config_workflow.outputs.data.workflow_config }} + + FinishCIReport: + if: ${{ !cancelled() }} + needs: + - config_workflow + - dockers_build_amd + - dockers_build_arm + - dockers_build_multiplatform_manifest + - fast_test + - build_amd_debug + - build_amd_asan + - build_amd_tsan + - build_amd_msan + - build_amd_ubsan + - build_amd_binary + - build_arm_asan + - build_arm_binary + - build_arm_tsan + - build_amd_release + - build_arm_release + - quick_functional_tests + - stateless_tests_arm_asan_targeted + - integration_tests_amd_asan_targeted + - stateless_tests_amd_asan_flaky_check + - integration_tests_amd_asan_flaky + - bugfix_validation_functional_tests + - bugfix_validation_integration_tests + - stateless_tests_amd_asan_distributed_plan_parallel_1_2 + - stateless_tests_amd_asan_distributed_plan_parallel_2_2 + - stateless_tests_amd_asan_db_disk_distributed_plan_sequential + - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel + - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential + - stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel + - stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential + - stateless_tests_amd_debug_asyncinsert_s3_storage_parallel + - stateless_tests_amd_debug_asyncinsert_s3_storage_sequential + - stateless_tests_amd_debug_parallel + - stateless_tests_amd_debug_sequential + - stateless_tests_amd_tsan_parallel_1_2 + - stateless_tests_amd_tsan_parallel_2_2 + - stateless_tests_amd_tsan_sequential_1_2 + - stateless_tests_amd_tsan_sequential_2_2 + - stateless_tests_amd_msan_parallel_1_2 + - stateless_tests_amd_msan_parallel_2_2 + - stateless_tests_amd_msan_sequential_1_2 + - stateless_tests_amd_msan_sequential_2_2 + - stateless_tests_amd_ubsan_parallel + - stateless_tests_amd_ubsan_sequential + - stateless_tests_amd_debug_distributed_plan_s3_storage_parallel + - stateless_tests_amd_debug_distributed_plan_s3_storage_sequential + - stateless_tests_amd_tsan_s3_storage_parallel_1_2 + - stateless_tests_amd_tsan_s3_storage_parallel_2_2 + - stateless_tests_amd_tsan_s3_storage_sequential_1_2 + - stateless_tests_amd_tsan_s3_storage_sequential_2_2 + - stateless_tests_arm_binary_parallel + - stateless_tests_arm_binary_sequential + - integration_tests_amd_asan_db_disk_old_analyzer_1_6 + - integration_tests_amd_asan_db_disk_old_analyzer_2_6 + - integration_tests_amd_asan_db_disk_old_analyzer_3_6 + - integration_tests_amd_asan_db_disk_old_analyzer_4_6 + - integration_tests_amd_asan_db_disk_old_analyzer_5_6 + - integration_tests_amd_asan_db_disk_old_analyzer_6_6 + - integration_tests_amd_binary_1_5 + - integration_tests_amd_binary_2_5 + - integration_tests_amd_binary_3_5 + - integration_tests_amd_binary_4_5 + - integration_tests_amd_binary_5_5 + - integration_tests_arm_binary_distributed_plan_1_4 + - integration_tests_arm_binary_distributed_plan_2_4 + - integration_tests_arm_binary_distributed_plan_3_4 + - integration_tests_arm_binary_distributed_plan_4_4 + - integration_tests_amd_tsan_1_6 + - integration_tests_amd_tsan_2_6 + - integration_tests_amd_tsan_3_6 + - integration_tests_amd_tsan_4_6 + - integration_tests_amd_tsan_5_6 + - integration_tests_amd_tsan_6_6 + - unit_tests_asan + - unit_tests_tsan + - unit_tests_msan + - unit_tests_ubsan + - docker_server_image + - docker_keeper_image + - install_packages_amd_release + - install_packages_arm_release + - compatibility_check_amd_release + - compatibility_check_arm_release + - stress_test_amd_debug + - stress_test_amd_tsan + - stress_test_arm_asan + - stress_test_arm_asan_s3 + - stress_test_amd_ubsan + - stress_test_amd_msan + - ast_fuzzer_amd_debug + - ast_fuzzer_arm_asan + - ast_fuzzer_amd_tsan + - ast_fuzzer_amd_msan + - ast_fuzzer_amd_ubsan + - buzzhouse_amd_debug + - buzzhouse_arm_asan + - buzzhouse_amd_tsan + - buzzhouse_amd_msan + - buzzhouse_amd_ubsan + - finish_workflow + - GrypeScanServer + - GrypeScanKeeper + - RegressionTestsRelease + - RegressionTestsAarch64 + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + - name: Finalize workflow report + if: ${{ !cancelled() }} + uses: ./.github/actions/create_workflow_report + with: + workflow_config: ${{ needs.config_workflow.outputs.data.workflow_config }} + final: true + + SourceUpload: + needs: [config_workflow, build_amd_release] + if: ${{ !failure() && !cancelled() }} + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + env: + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + VERSION: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + ref: ${{ fromJson(needs.config_workflow.outputs.data).git_ref }} + submodules: true + fetch-depth: 0 + filter: tree:0 + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + - name: Create source tar + run: | + cd .. && tar czf $RUNNER_TEMP/build_source.src.tar.gz ClickHouse/ + - name: Upload source tar + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + S3_PATH="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/build_amd_release" + else + S3_PATH="PRs/$PR_NUMBER/$COMMIT_SHA/build_amd_release" + fi + + aws s3 cp $RUNNER_TEMP/build_source.src.tar.gz s3://altinity-build-artifacts/$S3_PATH/clickhouse-$VERSION.src.tar.gz diff --git a/.github/workflows/regression-reusable-suite.yml b/.github/workflows/regression-reusable-suite.yml new file mode 100644 index 000000000000..45b2b09e06bb --- /dev/null +++ b/.github/workflows/regression-reusable-suite.yml @@ -0,0 +1,192 @@ +name: Regression suite +on: + workflow_call: + inputs: + ref: + description: "Commit SHA to checkout. Default: current (empty string)." + type: string + default: "" + workflow_config: + required: true + type: string + flags: + required: false + type: string + output_format: + required: true + type: string + extra_args: + required: false + type: string + suite_name: + required: true + type: string + suite_executable: + required: false + type: string + default: "regression.py" + timeout_minutes: + required: true + type: number + storage_path: + required: false + type: string + default: "" + regression_args: + required: false + type: string + default: "" + runner_type: + required: false + type: string + default: "" + runner_arch: + required: false + type: string + default: "x86" + job_name: + required: false + type: string + default: "" + part: + required: false + type: string + default: "" + build_sha: + required: false + type: string + default: "" + set_commit_status: + required: false + type: boolean + default: false +jobs: + suite: + name: ${{ format('{0}{1}', inputs.job_name != '' && inputs.job_name || inputs.suite_name, inputs.part != '' && format('_{0}', inputs.part) || '') }} + runs-on: [ + "self-hosted", + "altinity-on-demand", + "${{ inputs.runner_type }}", + ] + timeout-minutes: ${{ inputs.timeout_minutes }} + env: + SUITE: ${{ inputs.suite_name }} + SUITE_EXECUTABLE: ${{ inputs.suite_executable }} + STORAGE: ${{ inputs.storage_path }} + PART: ${{ inputs.part }} + # AWS credentials + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + # Docker credentials + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + # Database credentials + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + # LocalStack token + LOCALSTACK_AUTH_TOKEN: ${{ secrets.LOCALSTACK_AUTH_TOKEN }} + # Python encoding + PYTHONIOENCODING: utf-8 + build_sha: ${{ inputs.build_sha }} + pr_number: ${{ github.event.number }} + artifacts: builds + # Args + args: --test-to-end + --no-colors + --local + --collect-service-logs + --output ${{ inputs.output_format }} + --attr project="${GITHUB_REPOSITORY}" project.id="${GITHUB_REPOSITORY_ID}" user.name="${GITHUB_ACTOR}" version="${{ fromJson(inputs.workflow_config).custom_data.version.string }}" package="$clickhouse_path" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name=$GITHUB_JOB job.retry=$GITHUB_RUN_ATTEMPT job.url="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}" arch="$(uname -i)" + --cicd + --log raw.log + ${{ inputs.flags != 'none' && inputs.flags || ''}} + ${{ inputs.extra_args }} + artifact_paths: | + ./report.html + ./*.log.txt + ./*.log + ./*.html + ./*/_instances/*.log + ./*/_instances/*/logs/*.log + ./*/*/_instances/*/logs/*.log + ./*/*/_instances/*.log + + steps: + - name: ⤵️ Checkout + uses: actions/checkout@v4 + with: + repository: Altinity/clickhouse-regression + ref: ${{ inputs.ref }} + + - name: ♻️ Cache setup + uses: ./.github/actions/cache-setup + + - name: 🛠️ Setup + run: .github/setup.sh + + - name: 📦 Get deb url + env: + S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/ + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + run: | + REPORTS_PATH=${{ runner.temp }}/reports_dir + mkdir -p $REPORTS_PATH + cat > $REPORTS_PATH/workflow_config.json << 'EOF' + ${{ inputs.workflow_config }} + EOF + + python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha || github.sha }} --binary + + - name: 🔄 Process regression args + run: | + REGRESSION_ARGS='${{ inputs.regression_args }}' + # AWS replacements + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AWS_BUCKET}}'/${{ secrets.REGRESSION_AWS_S3_BUCKET }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AWS_REGION}}'/${{ secrets.REGRESSION_AWS_S3_REGION }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AWS_KEY_ID}}'/${{ secrets.REGRESSION_AWS_S3_KEY_ID }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AWS_ACCESS_KEY}}'/${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}}" + # GCS replacements + REGRESSION_ARGS="${REGRESSION_ARGS//'{{GCS_URI}}'/${{ secrets.REGRESSION_GCS_URI }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{GCS_KEY_ID}}'/${{ secrets.REGRESSION_GCS_KEY_ID }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{GCS_KEY_SECRET}}'/${{ secrets.REGRESSION_GCS_KEY_SECRET }}}" + # Azure replacements + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AZURE_ACCOUNT_NAME}}'/${{ secrets.AZURE_ACCOUNT_NAME }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AZURE_STORAGE_KEY}}'/${{ secrets.AZURE_STORAGE_KEY }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AZURE_CONTAINER_NAME}}'/${{ secrets.AZURE_CONTAINER_NAME }}}" + echo "REGRESSION_ARGS=$REGRESSION_ARGS" >> $GITHUB_ENV + + - name: 🧪 Run ${{ env.SUITE }} suite + id: run_suite + run: python3 + -u ${{ env.SUITE }}/${{ env.SUITE_EXECUTABLE }} + --clickhouse ${{ env.clickhouse_path }} + ${{ env.REGRESSION_ARGS }} + ${{ env.args }} || EXITCODE=$?; + .github/add_link_to_logs.sh; + exit $EXITCODE + + - name: 📊 Set Commit Status + if: always() && inputs.set_commit_status + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + JOB_OUTCOME: ${{ steps.run_suite.outcome }} + SUITE_NAME: ${{ format('Regression {0} {1}{2}', inputs.runner_arch, inputs.job_name != '' && inputs.job_name || inputs.suite_name, inputs.part != '' && format('_{0}', inputs.part) || '') }} + run: python3 .github/set_builds_status.py + + - name: 📝 Create and upload logs + if: always() + run: .github/create_and_upload_logs.sh 1 + + - name: 📤 Upload logs to results database + if: always() + timeout-minutes: 20 + run: .github/upload_results_to_database.sh 1 + + - uses: actions/upload-artifact@v4 + if: always() + with: + name: ${{ format('{0}{1}-artifacts-{2}{3}', inputs.job_name != '' && inputs.job_name || inputs.suite_name, inputs.part != '' && format('_{0}', inputs.part) || '', inputs.runner_arch, contains(inputs.extra_args, '--use-keeper') && '_keeper' || '_zookeeper') }} + path: ${{ env.artifact_paths }} + diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml new file mode 100644 index 000000000000..239db719b6e1 --- /dev/null +++ b/.github/workflows/regression.yml @@ -0,0 +1,488 @@ +name: Regression test workflow - Release +'on': + workflow_call: + inputs: + runner_type: + description: the (meta-)label of runner to use + required: true + type: string + commit: + description: commit hash of the regression tests. + required: true + type: string + arch: + description: arch to run the tests on. + required: true + type: string + timeout_minutes: + description: Maximum number of minutes to let workflow run before GitHub cancels it. + default: 210 + type: number + build_sha: + description: commit sha of the workflow run for artifact upload. + required: true + type: string + checkout_depth: + description: the value of the git shallow checkout + required: false + type: number + default: 1 + submodules: + description: if the submodules should be checked out + required: false + type: boolean + default: false + additional_envs: + description: additional ENV variables to setup the job + type: string + workflow_config: + description: workflow config for the run + required: true + type: string + secrets: + secret_envs: + description: if given, it's passed to the environments + required: false + AWS_SECRET_ACCESS_KEY: + description: the access key to the aws param store. + required: true + AWS_ACCESS_KEY_ID: + description: the access key id to the aws param store. + required: true + AWS_DEFAULT_REGION: + description: the region of the aws param store. + required: true + AWS_REPORT_KEY_ID: + description: aws s3 key id used for regression test reports. + required: true + AWS_REPORT_SECRET_ACCESS_KEY: + description: aws s3 secret access key used for regression test reports. + required: true + AWS_REPORT_REGION: + description: aws s3 region used for regression test reports. + required: true + DOCKER_USERNAME: + description: username of the docker user. + required: true + DOCKER_PASSWORD: + description: password to the docker user. + required: true + REGRESSION_AWS_S3_BUCKET: + description: aws s3 bucket used for regression tests. + required: true + REGRESSION_AWS_S3_KEY_ID: + description: aws s3 key id used for regression tests. + required: true + REGRESSION_AWS_S3_SECRET_ACCESS_KEY: + description: aws s3 secret access key used for regression tests. + required: true + REGRESSION_AWS_S3_REGION: + description: aws s3 region used for regression tests. + required: true + REGRESSION_GCS_KEY_ID: + description: gcs key id used for regression tests. + required: true + REGRESSION_GCS_KEY_SECRET: + description: gcs key secret used for regression tests. + required: true + REGRESSION_GCS_URI: + description: gcs uri used for regression tests. + required: true + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + args: --test-to-end + --no-colors + --local + --collect-service-logs + --output new-fails + --parallel 1 + --log raw.log + --with-analyzer + artifact_paths: | + ./report.html + ./*.log.txt + ./*.log + ./*.html + ./*/_instances/*.log + ./*/_instances/*/logs/*.log + ./*/*/_instances/*/logs/*.log + ./*/*/_instances/*.log + +jobs: + Common: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'common') + strategy: + fail-fast: false + matrix: + SUITE: [aes_encryption, atomic_insert, base_58, data_types, datetime64_extended_range, disk_level_encryption, dns, engines, example, extended_precision_data_types, functions, kafka, kerberos, key_value, lightweight_delete, memory, part_moves_between_shards, selects, session_timezone, swarms, version, window_functions] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: ${{ matrix.SUITE }} + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: ${{ matrix.SUITE }} + secrets: inherit + + AggregateFunctions: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'aggregate_functions') + strategy: + fail-fast: false + matrix: + PART: [1, 2, 3] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: aggregate_functions + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: aggregate_functions + extra_args: --only "part ${{ matrix.PART }}/*" + secrets: inherit + Alter: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'alter') + strategy: + fail-fast: false + matrix: + ONLY: [replace, move] + include: + - ONLY: attach + PART: 1 + - ONLY: attach + PART: 2 + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: alter + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: /${{ matrix.ONLY }}_partition + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: alter_${{ matrix.ONLY }} + extra_args: --only "/alter/${{ matrix.ONLY }} partition/${{ matrix.PART && format('part {0}/', matrix.PART) || '' }}*" + secrets: inherit + + Benchmark: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'benchmark') + strategy: + fail-fast: false + matrix: + STORAGE: [minio, aws_s3, gcs] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: ontime_benchmark + suite_executable: benchmark.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: /${{ matrix.STORAGE }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: benchmark_${{ matrix.STORAGE }} + regression_args: --storage ${{ matrix.STORAGE }} --gcs-uri {{GCS_URI}} --gcs-key-id {{GCS_KEY_ID}} --gcs-key-secret {{GCS_KEY_SECRET}} --aws-s3-bucket {{AWS_BUCKET}} --aws-s3-region {{AWS_REGION}} --aws-s3-key-id {{AWS_KEY_ID}} --aws-s3-access-key {{AWS_ACCESS_KEY}} + secrets: inherit + + ClickHouseKeeper: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'clickhouse_keeper') + strategy: + fail-fast: false + matrix: + PART: [1, 2] + SSL: [ssl, no_ssl] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: clickhouse_keeper + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: /${{ matrix.SSL }} + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: clickhouse_keeper_${{ matrix.SSL }} + extra_args: ${{ matrix.SSL == 'ssl' && '--ssl' || '' }} --only "part ${{ matrix.PART }}/*" + secrets: inherit + + Iceberg: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'iceberg') + strategy: + fail-fast: false + matrix: + PART: [1, 2] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: iceberg + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: iceberg + extra_args: --only ${{ matrix.PART == 1 && '"/iceberg/iceberg engine/rest catalog/*" "/iceberg/s3 table function/*" "/iceberg/icebergS3 table function/*" "/iceberg/iceberg cache/*"' || '"/iceberg/iceberg engine/glue catalog/*" "/iceberg/iceberg table engine/*"' }} + secrets: inherit + LDAP: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'ldap') + strategy: + fail-fast: false + matrix: + SUITE: [authentication, external_user_directory, role_mapping] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: ldap/${{ matrix.SUITE }} + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: ldap_${{ matrix.SUITE }} + secrets: inherit + + Parquet: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'parquet') + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: parquet + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: parquet + secrets: inherit + + ParquetS3: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'parquet') + strategy: + fail-fast: false + matrix: + STORAGE: [minio, aws_s3] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: parquet + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: ${{ matrix.STORAGE }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: parquet_${{ matrix.STORAGE }} + regression_args: --storage ${{ matrix.STORAGE }} --aws-s3-bucket {{AWS_BUCKET}} --aws-s3-region {{AWS_REGION}} --aws-s3-key-id {{AWS_KEY_ID}} --aws-s3-access-key {{AWS_ACCESS_KEY}} + secrets: inherit + + RBAC: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'rbac') + strategy: + fail-fast: false + matrix: + PART: [1, 2, 3] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: rbac + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: rbac + extra_args: --only "/rbac/part ${{ matrix.PART }}/*" + secrets: inherit + SSLServer: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'ssl_server') + strategy: + fail-fast: false + matrix: + PART: [1, 2, 3] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: ssl_server + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: ssl_server + extra_args: --only "part ${{ matrix.PART }}/*" + secrets: inherit + + S3: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 's3') + strategy: + fail-fast: false + matrix: + STORAGE: [aws_s3, gcs, azure, minio] + PART: [1, 2] + include: + - STORAGE: minio + PART: 3 + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: s3 + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: /${{ matrix.STORAGE }} + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: s3_${{ matrix.STORAGE }} + regression_args: --storage ${{ matrix.STORAGE }} --gcs-uri {{GCS_URI}} --gcs-key-id {{GCS_KEY_ID}} --gcs-key-secret {{GCS_KEY_SECRET}} --aws-s3-bucket {{AWS_BUCKET}} --aws-s3-region {{AWS_REGION}} --aws-s3-key-id {{AWS_KEY_ID}} --aws-s3-access-key {{AWS_ACCESS_KEY}} --azure-account-name {{AZURE_ACCOUNT_NAME}} --azure-storage-key {{AZURE_STORAGE_KEY}} --azure-container {{AZURE_CONTAINER_NAME}} + extra_args: --only ":/try*" ":/part ${{ matrix.PART }}/*" + secrets: inherit + + S3Export: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 's3') + strategy: + fail-fast: false + matrix: + PART: [part, partition] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: s3 + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: /minio + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: s3_export + regression_args: --storage minio + extra_args: --only ":/try*" "minio/export tests/export ${{ matrix.PART }}/*" + secrets: inherit + + TieredStorage: + if: | + fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'tiered_storage') + strategy: + fail-fast: false + matrix: + STORAGE: [local, minio, s3amazon, s3gcs] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: tiered_storage + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: /${{ matrix.STORAGE }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: tiered_storage_${{ matrix.STORAGE }} + regression_args: --aws-s3-access-key {{AWS_ACCESS_KEY}} --aws-s3-key-id {{AWS_KEY_ID}} --aws-s3-uri https://s3.{{AWS_REGION}}.amazonaws.com/{{AWS_BUCKET}}/data/ --gcs-key-id {{GCS_KEY_ID}} --gcs-key-secret {{GCS_KEY_SECRET}} --gcs-uri {{GCS_URI}} + extra_args: ${{ matrix.STORAGE != 'local' && format('--with-{0}', matrix.STORAGE) || '' }} + secrets: inherit diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index c14b40f56d50..8f1bc37ab701 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -3,6 +3,13 @@ name: ReleaseBranchCI on: + workflow_dispatch: + inputs: + no_cache: + description: Run without cache + required: false + type: boolean + default: false push: branches: ['2[1-9].[1-9][0-9]', '2[1-9].[1-9]'] @@ -10,6 +17,20 @@ env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 CHECKOUT_REF: "" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }} + AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }} + AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }} + AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + GH_TOKEN: ${{ github.token }} # Allow updating GH commit statuses and PR comments to post an actual job reports link permissions: write-all @@ -17,7 +38,7 @@ permissions: write-all jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -29,6 +50,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -56,7 +97,7 @@ jobs: fi dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -69,6 +110,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -96,7 +144,7 @@ jobs: fi dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -109,6 +157,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -136,7 +191,7 @@ jobs: fi build_amd_debug: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} name: "Build (amd_debug)" @@ -149,6 +204,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -176,7 +238,7 @@ jobs: fi build_amd_asan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} name: "Build (amd_asan)" @@ -189,6 +251,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -216,7 +285,7 @@ jobs: fi build_amd_tsan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} name: "Build (amd_tsan)" @@ -229,6 +298,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -256,7 +332,7 @@ jobs: fi build_amd_msan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }} name: "Build (amd_msan)" @@ -269,6 +345,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -296,7 +379,7 @@ jobs: fi build_amd_ubsan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }} name: "Build (amd_ubsan)" @@ -309,6 +392,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -336,7 +426,7 @@ jobs: fi build_arm_asan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }} name: "Build (arm_asan)" @@ -349,6 +439,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -376,7 +473,7 @@ jobs: fi build_amd_release: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} name: "Build (amd_release)" @@ -389,6 +486,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -416,7 +520,7 @@ jobs: fi build_arm_release: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} name: "Build (arm_release)" @@ -429,6 +533,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -456,7 +567,7 @@ jobs: fi build_amd_darwin: - runs-on: [self-hosted, amd-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }} name: "Build (amd_darwin)" @@ -469,6 +580,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_darwin)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -496,7 +614,7 @@ jobs: fi build_arm_darwin: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }} name: "Build (arm_darwin)" @@ -509,6 +627,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_darwin)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -536,7 +661,7 @@ jobs: fi docker_server_image: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} name: "Docker server image" @@ -549,6 +674,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker server image" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -576,7 +708,7 @@ jobs: fi docker_keeper_image: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} name: "Docker keeper image" @@ -589,6 +721,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker keeper image" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -616,7 +755,7 @@ jobs: fi install_packages_amd_release: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} name: "Install packages (amd_release)" @@ -629,6 +768,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -656,7 +802,7 @@ jobs: fi install_packages_arm_release: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_release] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} name: "Install packages (arm_release)" @@ -669,6 +815,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -696,7 +849,7 @@ jobs: fi stateless_tests_amd_asan_distributed_plan_parallel_1_2: - runs-on: [self-hosted, amd-medium-cpu] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" @@ -709,6 +862,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -736,7 +896,7 @@ jobs: fi stateless_tests_amd_asan_distributed_plan_parallel_2_2: - runs-on: [self-hosted, amd-medium-cpu] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" @@ -749,6 +909,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -776,7 +943,7 @@ jobs: fi stateless_tests_amd_asan_db_disk_distributed_plan_sequential: - runs-on: [self-hosted, amd-small-mem] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGIgZGlzaywgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" @@ -789,6 +956,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -816,7 +990,7 @@ jobs: fi integration_tests_amd_asan_db_disk_1_4: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCAxLzQp') }} name: "Integration tests (amd_asan, db disk, 1/4)" @@ -829,6 +1003,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, 1/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -856,7 +1037,7 @@ jobs: fi integration_tests_amd_asan_db_disk_2_4: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCAyLzQp') }} name: "Integration tests (amd_asan, db disk, 2/4)" @@ -869,6 +1050,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, 2/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -896,7 +1084,7 @@ jobs: fi integration_tests_amd_asan_db_disk_3_4: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCAzLzQp') }} name: "Integration tests (amd_asan, db disk, 3/4)" @@ -909,6 +1097,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, 3/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -936,7 +1131,7 @@ jobs: fi integration_tests_amd_asan_db_disk_4_4: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCA0LzQp') }} name: "Integration tests (amd_asan, db disk, 4/4)" @@ -949,6 +1144,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, 4/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -976,7 +1178,7 @@ jobs: fi integration_tests_amd_asan_db_disk_old_analyzer_1_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" @@ -989,6 +1191,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1016,7 +1225,7 @@ jobs: fi integration_tests_amd_asan_db_disk_old_analyzer_2_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" @@ -1029,6 +1238,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1056,7 +1272,7 @@ jobs: fi integration_tests_amd_asan_db_disk_old_analyzer_3_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" @@ -1069,6 +1285,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1096,7 +1319,7 @@ jobs: fi integration_tests_amd_asan_db_disk_old_analyzer_4_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" @@ -1109,6 +1332,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1136,7 +1366,7 @@ jobs: fi integration_tests_amd_asan_db_disk_old_analyzer_5_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" @@ -1149,6 +1379,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1176,7 +1413,7 @@ jobs: fi integration_tests_amd_asan_db_disk_old_analyzer_6_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" @@ -1189,6 +1426,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1216,7 +1460,7 @@ jobs: fi integration_tests_amd_tsan_1_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} name: "Integration tests (amd_tsan, 1/6)" @@ -1229,6 +1473,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1256,7 +1507,7 @@ jobs: fi integration_tests_amd_tsan_2_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} name: "Integration tests (amd_tsan, 2/6)" @@ -1269,6 +1520,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1296,7 +1554,7 @@ jobs: fi integration_tests_amd_tsan_3_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} name: "Integration tests (amd_tsan, 3/6)" @@ -1309,6 +1567,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1336,7 +1601,7 @@ jobs: fi integration_tests_amd_tsan_4_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} name: "Integration tests (amd_tsan, 4/6)" @@ -1349,6 +1614,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1376,7 +1648,7 @@ jobs: fi integration_tests_amd_tsan_5_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} name: "Integration tests (amd_tsan, 5/6)" @@ -1389,6 +1661,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1416,7 +1695,7 @@ jobs: fi integration_tests_amd_tsan_6_6: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} name: "Integration tests (amd_tsan, 6/6)" @@ -1429,6 +1708,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1456,7 +1742,7 @@ jobs: fi stress_test_amd_debug: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }} name: "Stress test (amd_debug)" @@ -1469,6 +1755,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1496,7 +1789,7 @@ jobs: fi stress_test_amd_tsan: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} name: "Stress test (amd_tsan)" @@ -1509,6 +1802,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1536,7 +1836,7 @@ jobs: fi stress_test_arm_asan: - runs-on: [self-hosted, arm-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }} name: "Stress test (arm_asan)" @@ -1549,6 +1849,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1576,7 +1883,7 @@ jobs: fi stress_test_arm_asan_s3: - runs-on: [self-hosted, arm-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_asan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuLCBzMyk=') }} name: "Stress test (arm_asan, s3)" @@ -1589,6 +1896,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (arm_asan, s3)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1616,7 +1930,7 @@ jobs: fi stress_test_amd_ubsan: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_ubsan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }} name: "Stress test (amd_ubsan)" @@ -1629,6 +1943,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1656,7 +1977,7 @@ jobs: fi stress_test_amd_msan: - runs-on: [self-hosted, amd-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_msan] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }} name: "Stress test (amd_msan)" @@ -1669,6 +1990,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1696,7 +2024,7 @@ jobs: fi finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_arm_asan, build_amd_release, build_arm_release, build_amd_darwin, build_arm_darwin, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, integration_tests_amd_asan_db_disk_1_4, integration_tests_amd_asan_db_disk_2_4, integration_tests_amd_asan_db_disk_3_4, integration_tests_amd_asan_db_disk_4_4, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, stress_test_amd_debug, stress_test_amd_tsan, stress_test_arm_asan, stress_test_arm_asan_s3, stress_test_amd_ubsan, stress_test_amd_msan] if: ${{ always() }} name: "Finish Workflow" @@ -1709,6 +2037,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp diff --git a/.github/workflows/release_builds.yml b/.github/workflows/release_builds.yml new file mode 100644 index 000000000000..2355e4181dfe --- /dev/null +++ b/.github/workflows/release_builds.yml @@ -0,0 +1,1317 @@ +# generated by praktika + +name: Release Builds +on: + workflow_dispatch: + inputs: + +env: + PYTHONUNBUFFERED: 1 + CHECKOUT_REF: "" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }} + AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }} + AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }} + AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + GH_TOKEN: ${{ github.token }} + + +jobs: + + config_workflow: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [] + name: "Config Workflow" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Config Workflow' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Config Workflow' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + dockers_build_amd: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow] + name: "Dockers Build (amd)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Dockers Build (amd)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Dockers Build (amd)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + dockers_build_arm: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [config_workflow] + name: "Dockers Build (arm)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Dockers Build (arm)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Dockers Build (arm)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, build_arm_binary] + name: "Build (amd_debug)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_debug)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_debug)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_debug)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, build_arm_binary] + name: "Build (amd_asan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_asan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_asan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_asan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, build_arm_binary] + name: "Build (amd_tsan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_tsan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_tsan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_tsan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, build_arm_binary] + name: "Build (amd_msan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_msan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_msan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_msan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, build_arm_binary] + name: "Build (amd_ubsan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_ubsan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_ubsan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_ubsan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_binary: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm] + name: "Build (amd_binary)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_binary)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_binary)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_binary)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, build_arm_binary] + name: "Build (arm_asan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_asan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (arm_asan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (arm_asan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_arm_binary: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm] + name: "Build (arm_binary)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_binary)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (arm_binary)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (arm_binary)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm] + name: "Build (amd_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_release)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_release)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm] + name: "Build (arm_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (arm_release)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (arm_release)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + docker_server_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release] + name: "Docker server image" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker server image" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Docker server image' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Docker server image' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + docker_keeper_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release] + name: "Docker keeper image" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker keeper image" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Docker keeper image' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Docker keeper image' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + install_packages_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release] + name: "Install packages (amd_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (amd_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Install packages (amd_release)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Install packages (amd_release)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + install_packages_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_release] + name: "Install packages (arm_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (arm_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Install packages (arm_release)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Install packages (arm_release)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary] + name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary] + name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary] + name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary] + name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_arm_binary_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_binary] + name: "Stateless tests (arm_binary, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_arm_binary_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_binary] + name: "Stateless tests (arm_binary, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + finish_workflow: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary, build_amd_release, build_arm_release, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential] + if: ${{ always() }} + name: "Finish Workflow" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Finish Workflow' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Finish Workflow' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + +########################################################################################## +##################################### ALTINITY JOBS ###################################### +########################################################################################## + + GrypeScanServer: + needs: [config_workflow, docker_server_image] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + strategy: + fail-fast: false + matrix: + suffix: ['', '-alpine'] + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-server + version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + tag-suffix: ${{ matrix.suffix }} + GrypeScanKeeper: + needs: [config_workflow, docker_keeper_image] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-keeper + version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + + SignRelease: + needs: [config_workflow, build_amd_release] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_sign.yml + secrets: inherit + with: + test_name: Sign release + runner_type: altinity-style-checker + data: ${{ needs.config_workflow.outputs.data }} + SignAarch64: + needs: [config_workflow, build_arm_release] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_sign.yml + secrets: inherit + with: + test_name: Sign aarch64 + runner_type: altinity-style-checker-aarch64 + data: ${{ needs.config_workflow.outputs.data }} + + FinishCIReport: + if: ${{ !cancelled() }} + needs: + - config_workflow + - dockers_build_amd + - dockers_build_arm + - build_amd_debug + - build_amd_asan + - build_amd_tsan + - build_amd_msan + - build_amd_ubsan + - build_amd_binary + - build_arm_asan + - build_arm_binary + - build_amd_release + - build_arm_release + - docker_server_image + - docker_keeper_image + - install_packages_amd_release + - install_packages_arm_release + - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel + - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential + - stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel + - stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential + - stateless_tests_arm_binary_parallel + - stateless_tests_arm_binary_sequential + - finish_workflow + - GrypeScanServer + - GrypeScanKeeper + - SignRelease + - SignAarch64 + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + - name: Finalize workflow report + if: ${{ !cancelled() }} + uses: ./.github/actions/create_workflow_report + with: + workflow_config: ${{ needs.config_workflow.outputs.data.workflow_config }} + final: true + + SourceUpload: + needs: [config_workflow, build_amd_release] + if: ${{ !failure() && !cancelled() }} + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + env: + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + VERSION: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }} + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + ref: ${{ fromJson(needs.config_workflow.outputs.data).git_ref }} + submodules: true + fetch-depth: 0 + filter: tree:0 + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + - name: Create source tar + run: | + cd .. && tar czf $RUNNER_TEMP/build_source.src.tar.gz ClickHouse/ + - name: Upload source tar + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + S3_PATH="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/build_amd_release" + else + S3_PATH="PRs/$PR_NUMBER/$COMMIT_SHA/build_amd_release" + fi + + aws s3 cp $RUNNER_TEMP/build_source.src.tar.gz s3://altinity-build-artifacts/$S3_PATH/clickhouse-$VERSION.src.tar.gz diff --git a/.github/workflows/repo-sanity-checks.yml b/.github/workflows/repo-sanity-checks.yml new file mode 100644 index 000000000000..ec50a056b730 --- /dev/null +++ b/.github/workflows/repo-sanity-checks.yml @@ -0,0 +1,150 @@ +name: Repository Sanity Checks + +on: + workflow_dispatch: # Manual trigger only + + workflow_call: + +jobs: + sanity-checks: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + strategy: + fail-fast: false # Continue with other combinations if one fails + matrix: + include: + # Production packages + - env: prod + type: deb + base: ubuntu:22.04 + repo_url: https://builds.altinity.cloud/apt-repo + - env: prod + type: rpm + base: centos:8 + repo_url: https://builds.altinity.cloud/yum-repo + # FIPS Production packages + - env: prod-fips + type: deb + base: ubuntu:22.04 + repo_url: https://builds.altinity.cloud/fips-apt-repo + - env: prod-fips + type: rpm + base: centos:8 + repo_url: https://builds.altinity.cloud/fips-yum-repo + # Staging packages + - env: staging + type: deb + base: ubuntu:22.04 + repo_url: https://builds.staging.altinity.cloud/apt-repo + - env: staging + type: rpm + base: centos:8 + repo_url: https://builds.staging.altinity.cloud/yum-repo + # FIPS Staging packages + - env: staging-fips + type: deb + base: ubuntu:22.04 + repo_url: https://builds.staging.altinity.cloud/fips-apt-repo + - env: staging-fips + type: rpm + base: centos:8 + repo_url: https://builds.staging.altinity.cloud/fips-yum-repo + # Hotfix packages + - env: hotfix + type: deb + base: ubuntu:22.04 + repo_url: https://builds.altinity.cloud/hotfix-apt-repo + - env: hotfix + type: rpm + base: centos:8 + repo_url: https://builds.altinity.cloud/hotfix-yum-repo + # Antalya experimental packages + - env: antalya + type: deb + base: ubuntu:22.04 + repo_url: https://builds.altinity.cloud/antalya-apt-repo + - env: antalya + type: rpm + base: centos:8 + repo_url: https://builds.altinity.cloud/antalya-yum-repo + # Hotfix staging packages + - env: hotfix-staging + type: deb + base: ubuntu:22.04 + repo_url: https://builds.staging.altinity.cloud/hotfix-apt-repo + - env: hotfix-staging + type: rpm + base: centos:8 + repo_url: https://builds.staging.altinity.cloud/hotfix-yum-repo + # Antalya experimental staging packages + - env: antalya-staging + type: deb + base: ubuntu:22.04 + repo_url: https://builds.staging.altinity.cloud/antalya-apt-repo + - env: antalya-staging + type: rpm + base: centos:8 + repo_url: https://builds.staging.altinity.cloud/antalya-yum-repo + + steps: + - name: Run sanity check + run: | + cat << 'EOF' > sanity.sh + #!/bin/bash + set -e -x + + # Package installation commands based on type + if [ "${{ matrix.type }}" = "deb" ]; then + export DEBIAN_FRONTEND=noninteractive + apt-get update && apt-get install -y apt-transport-https ca-certificates curl gnupg2 dialog sudo + mkdir -p /usr/share/keyrings + curl -s "${REPO_URL}/pubkey.gpg" | gpg --dearmor > /usr/share/keyrings/altinity-archive-keyring.gpg + echo "deb [signed-by=/usr/share/keyrings/altinity-archive-keyring.gpg] ${REPO_URL} stable main" > /etc/apt/sources.list.d/altinity.list + apt-get update + apt-get install -y clickhouse-server clickhouse-client + else + sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* + yum install -y curl gnupg2 sudo + if [[ "${{ matrix.env }}" == *"staging"* ]]; then + curl "${REPO_URL}/altinity-staging.repo" -o /etc/yum.repos.d/altinity-staging.repo + else + curl "${REPO_URL}/altinity.repo" -o /etc/yum.repos.d/altinity.repo + fi + yum install -y clickhouse-server clickhouse-client + fi + + # Ensure correct ownership + chown -R clickhouse /var/lib/clickhouse/ + chown -R clickhouse /var/log/clickhouse-server/ + + # Check server version + server_version=$(clickhouse-server --version) + echo "$server_version" | grep "altinity" || FAILED_SERVER=true + + # Start server and test + sudo -u clickhouse clickhouse-server --config-file /etc/clickhouse-server/config.xml --daemon + sleep 10 + clickhouse-client -q 'SELECT 1' + + # Check client version + client_version=$(clickhouse-client --version) + echo "$client_version" | grep "altinity" || FAILED_CLIENT=true + + # Report results + if [ "$FAILED_SERVER" = true ]; then + echo "::error::Server check failed - Version: $server_version" + exit 1 + elif [ "$FAILED_CLIENT" = true ]; then + echo "::error::Client check failed - Version: $client_version" + exit 1 + else + echo "All checks passed successfully!" + fi + EOF + + chmod +x sanity.sh + docker run --rm \ + -v $(pwd)/sanity.sh:/sanity.sh \ + -e REPO_URL="${{ matrix.repo_url }}" \ + ${{ matrix.base }} \ + /sanity.sh diff --git a/.github/workflows/reusable_sign.yml b/.github/workflows/reusable_sign.yml new file mode 100644 index 000000000000..7bfed2758359 --- /dev/null +++ b/.github/workflows/reusable_sign.yml @@ -0,0 +1,166 @@ +name: Sigining workflow +'on': + workflow_call: + inputs: + test_name: + description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV + required: true + type: string + runner_type: + description: the label of runner to use + required: true + type: string + run_command: + description: the command to launch the check + default: "" + required: false + type: string + checkout_depth: + description: the value of the git shallow checkout + required: false + type: number + default: 1 + submodules: + description: if the submodules should be checked out + required: false + type: boolean + default: false + additional_envs: + description: additional ENV variables to setup the job + type: string + data: + description: ci data + type: string + required: true + working-directory: + description: sets custom working directory + type: string + default: "$GITHUB_WORKSPACE/tests/ci" + secrets: + secret_envs: + description: if given, it's passed to the environments + required: false + AWS_SECRET_ACCESS_KEY: + description: the access key to the aws param store. + required: true + AWS_ACCESS_KEY_ID: + description: the access key id to the aws param store. + required: true + GPG_BINARY_SIGNING_KEY: + description: gpg signing key for packages. + required: true + GPG_BINARY_SIGNING_PASSPHRASE: + description: gpg signing key passphrase. + required: true + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + CHECK_NAME: ${{inputs.test_name}} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + +jobs: + runner_labels_setup: + name: Compute proper runner labels for the rest of the jobs + runs-on: ubuntu-latest + outputs: + runner_labels: ${{ steps.setVariables.outputs.runner_labels }} + steps: + - id: setVariables + name: Prepare runner_labels variables for the later steps + run: | + + # Prepend self-hosted + input="self-hosted, altinity-on-demand, ${input}" + + # Remove all whitespace + input="$(echo ${input} | tr -d [:space:])" + # Make something like a JSON array from comma-separated list + input="[ '${input//\,/\'\, \'}' ]" + + echo "runner_labels=$input" >> ${GITHUB_OUTPUT} + env: + input: ${{ inputs.runner_type }} + + Test: + needs: [runner_labels_setup] + runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} + name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }} + env: + GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }} + strategy: + fail-fast: false # we always wait for entire matrix + matrix: + batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }} + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + ref: ${{ fromJson(inputs.data).git_ref }} + submodules: ${{inputs.submodules}} + fetch-depth: ${{inputs.checkout_depth}} + filter: tree:0 + - name: Set build envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + CHECK_NAME=${{ inputs.test_name }} + ${{inputs.additional_envs}} + ${{secrets.secret_envs}} + DOCKER_TAG< 1 }} + run: | + cat >> "$GITHUB_ENV" << 'EOF' + RUN_BY_HASH_NUM=${{matrix.batch}} + RUN_BY_HASH_TOTAL=${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches }} + EOF + - name: Pre run + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.test_name}}' + - name: Sign release + env: + GPG_BINARY_SIGNING_KEY: ${{ secrets.GPG_BINARY_SIGNING_KEY }} + GPG_BINARY_SIGNING_PASSPHRASE: ${{ secrets.GPG_BINARY_SIGNING_PASSPHRASE }} + run: | + cd "${{ inputs.working-directory }}" + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \ + --infile ${{ toJson(inputs.data) }} \ + --job-name '${{inputs.test_name}}' \ + --run \ + --force \ + --run-command '''python3 sign_release.py''' + - name: Post run + if: ${{ !cancelled() }} + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}' + - name: Mark as done + if: ${{ !cancelled() }} + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}} + - name: Upload signed hashes + uses: actions/upload-artifact@v4 + with: + name: ${{inputs.test_name}} signed-hashes + path: ${{ env.TEMP_PATH }}/*.gpg + - name: Clean + if: always() + uses: ./.github/actions/clean diff --git a/.github/workflows/scheduled_runs.yml b/.github/workflows/scheduled_runs.yml new file mode 100644 index 000000000000..9069ea7685f2 --- /dev/null +++ b/.github/workflows/scheduled_runs.yml @@ -0,0 +1,55 @@ +name: Scheduled Altinity Stable Builds + +on: + schedule: + - cron: '0 0 * * 6' #Weekly run for stable versions + - cron: '0 0 * * *' #Daily run for antalya versions + # Make sure that any changes to this file is actually tested with PRs + pull_request: + types: + - synchronize + - reopened + - opened + paths: + - '**/scheduled_runs.yml' + +jobs: + DailyRuns: + strategy: + fail-fast: false + matrix: + branch: + - antalya + name: ${{ matrix.branch }} + if: github.event.schedule != '0 0 * * 6' + runs-on: ubuntu-latest + steps: + - name: Run ${{ matrix.branch }} workflow + run: | + curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{ secrets.TOKEN }}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/Altinity/ClickHouse/actions/workflows/release_branches.yml/dispatches \ + -d '{"ref":"${{ matrix.branch }}"}' + + WeeklyRuns: + strategy: + fail-fast: false + matrix: + branch: + - customizations/24.8.14 + name: ${{ matrix.branch }} + if: github.event.schedule != '0 0 * * *' + runs-on: ubuntu-latest + steps: + - name: Run ${{ matrix.branch }} workflow + run: | + curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{ secrets.TOKEN }}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/Altinity/ClickHouse/actions/workflows/release_branches.yml/dispatches \ + -d '{"ref":"${{ matrix.branch }}"}' diff --git a/.github/workflows/sign_and_release.yml b/.github/workflows/sign_and_release.yml new file mode 100644 index 000000000000..f5a48dee97f5 --- /dev/null +++ b/.github/workflows/sign_and_release.yml @@ -0,0 +1,567 @@ +name: Sign and Release packages + +on: + workflow_dispatch: + inputs: + workflow_url: + description: 'The URL to the workflow run that produced the packages' + required: true + release_environment: + description: 'The environment to release to. "staging" or "production"' + required: true + default: 'staging' + package_version: + description: 'The version of the package to release' + required: true + type: string + GPG_PASSPHRASE: + description: 'GPG passphrase for signing (required for production releases)' + required: false + type: string + +env: + ARTIFACT_NAME: build_report_package_release + AWS_REGION: us-east-1 + SRC_BUCKET: altinity-build-artifacts + S3_STORAGE_BUCKET: altinity-test-reports + +jobs: + extract-package-info: + runs-on: [altinity-style-checker-aarch64, altinity-on-demand] + outputs: + docker_version: ${{ env.DOCKER_VERSION }}-${{ env.PACKAGE_VERSION }} + commit_hash: ${{ env.COMMIT_HASH }} + folder_time: ${{ env.FOLDER_TIME }} + needs_binary_processing: ${{ env.NEEDS_BINARY_PROCESSING }} + package_version: ${{ env.PACKAGE_VERSION }} + src_dir: ${{ env.SRC_DIR }} + test_results_src: ${{ env.TEST_RESULTS_SRC }} + altinity_build_feature: ${{ env.ALTINITY_BUILD_FEATURE }} + repo_prefix: ${{ env.REPO_PREFIX }} + src_url: ${{ env.SRC_URL }} + dest_url: ${{ env.DEST_URL }} + steps: + - name: Validate inputs + run: | + if [ -z "${{ inputs.workflow_url }}" ]; then + echo "Error: workflow_url is required" + exit 1 + fi + if [ -z "${{ inputs.package_version }}" ]; then + echo "Error: package_version is required" + exit 1 + fi + if [ "${{ inputs.release_environment }}" != "staging" ] && [ "${{ inputs.release_environment }}" != "production" ]; then + echo "Error: release_environment must be either 'staging' or 'production'" + exit 1 + fi + + - name: Download artifact "${{ env.ARTIFACT_NAME }}" + run: | + run_id=$(echo "${{ inputs.workflow_url }}" | grep -oE '[0-9]+$') + + # Get artifact ID + artifact_id=$(curl -s "https://api.github.com/repos/Altinity/ClickHouse/actions/runs/$run_id/artifacts" \ + | jq '.artifacts[] | select(.name == "'"${{ env.ARTIFACT_NAME }}"'") | .id') + + # Download artifact + curl -L -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o "${{ env.ARTIFACT_NAME }}" \ + "https://api.github.com/repos/Altinity/ClickHouse/actions/artifacts/$artifact_id/zip" + + - name: Unzip Artifact + run: | + unzip -o "${{ env.ARTIFACT_NAME }}" -d "artifact" + + - name: Extract and Parse JSON File + run: | + cd artifact + JSON_FILE=$(ls | grep "build_report.*package_release\.json" | head -n 1) + if [ -z "$JSON_FILE" ]; then + echo "Error: No JSON file matching the pattern was found" + exit 1 + fi + echo "Found JSON file: ${JSON_FILE}" + + # Extract client URL + CLIENT_URL=$(jq -r '.build_urls[] | select(test("clickhouse-client-.*-amd64.tgz$"))' "$JSON_FILE") + if [ -z "$CLIENT_URL" ]; then + echo "Error: No matching client URL found in JSON" + exit 1 + fi + echo "Found client URL: ${CLIENT_URL}" + echo "CLIENT_URL=$CLIENT_URL" >> $GITHUB_ENV + + - name: Extract and Validate Package Information + run: | + # Define regex patterns + PR_REGEX="PRs/([^/]+)/([^/]+)/([^/]+)/clickhouse-client-([^-]+)-amd64.tgz" + NONPR_REGEX="s3.amazonaws.com/([^/]+)/([^/]+)/([^/]+)/([^/]+)/clickhouse-client-([^-]+)-amd64.tgz" + + # Extract information based on URL pattern + if [[ "$CLIENT_URL" =~ $PR_REGEX ]]; then + echo "Matched PR pattern" + PR_NUMBER="${BASH_REMATCH[1]}" + COMMIT_HASH="${BASH_REMATCH[2]}" + PACKAGE_TYPE="${BASH_REMATCH[3]}" + PACKAGE_VERSION="${BASH_REMATCH[4]}" + DOCKER_VERSION="${PR_NUMBER}" + TEST_RESULTS_SRC="${PR_NUMBER}" + SRC_DIR="PRs/${PR_NUMBER}" + elif [[ "$CLIENT_URL" =~ $NONPR_REGEX ]]; then + echo "Matched non-PR pattern" + BRANCH="${BASH_REMATCH[2]}" + COMMIT_HASH="${BASH_REMATCH[3]}" + PACKAGE_TYPE="${BASH_REMATCH[4]}" + PACKAGE_VERSION="${BASH_REMATCH[5]}" + DOCKER_VERSION="0" + TEST_RESULTS_SRC="0" + SRC_DIR="${BRANCH}" + else + echo "Error: The client URL did not match any expected pattern" + exit 1 + fi + + # Verify package version + if [ "$PACKAGE_VERSION" != "${{ inputs.package_version }}" ]; then + echo "Error: Extracted package version ($PACKAGE_VERSION) does not match input package version (${{ inputs.package_version }})" + exit 1 + fi + + # Extract major version and determine binary processing need + MAJOR_VERSION=$(echo "$PACKAGE_VERSION" | cut -d. -f1) + NEEDS_BINARY_PROCESSING=$([ "$MAJOR_VERSION" -ge 24 ] && echo "true" || echo "false") + + # Extract feature and set repo prefix + ALTINITY_BUILD_FEATURE=$(echo "$PACKAGE_VERSION" | rev | cut -d. -f1 | rev) + case "$ALTINITY_BUILD_FEATURE" in + "altinityhotfix") REPO_PREFIX="hotfix-" ;; + "altinityfips") REPO_PREFIX="fips-" ;; + "altinityantalya") REPO_PREFIX="antalya-" ;; + "altinitystable"|"altinitytest") REPO_PREFIX="" ;; + *) + echo "Error: Build feature not supported: ${ALTINITY_BUILD_FEATURE}" + exit 1 + ;; + esac + + # Generate folder time + FOLDER_TIME=$(date -u +"%Y-%m-%dT%H-%M-%S.%3N") + + # Set all environment variables at once + { + echo "COMMIT_HASH=${COMMIT_HASH}" + echo "DOCKER_VERSION=${DOCKER_VERSION}" + echo "FOLDER_TIME=${FOLDER_TIME}" + echo "NEEDS_BINARY_PROCESSING=${NEEDS_BINARY_PROCESSING}" + echo "PACKAGE_VERSION=${PACKAGE_VERSION}" + echo "SRC_DIR=${SRC_DIR}" + echo "TEST_RESULTS_SRC=${TEST_RESULTS_SRC}" + echo "ALTINITY_BUILD_FEATURE=${ALTINITY_BUILD_FEATURE}" + echo "REPO_PREFIX=${REPO_PREFIX}" + echo "SRC_URL=s3://${SRC_BUCKET}/${SRC_DIR}/${COMMIT_HASH}" + echo "DEST_URL=s3://${S3_STORAGE_BUCKET}/builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}" + } >> $GITHUB_ENV + + - name: Display Extracted Information + run: | + echo "Extracted information:" + echo "altinity_build_feature: ${ALTINITY_BUILD_FEATURE}" + echo "commit_hash: ${COMMIT_HASH}" + echo "docker_version: ${DOCKER_VERSION}" + echo "folder_time: ${FOLDER_TIME}" + echo "needs_binary_processing: ${NEEDS_BINARY_PROCESSING}" + echo "package_version: ${PACKAGE_VERSION}" + echo "repo_prefix: ${REPO_PREFIX}" + echo "src_bucket: ${SRC_BUCKET}" + echo "src_dir: ${SRC_DIR}" + echo "test_results_src: ${TEST_RESULTS_SRC}" + echo "src_url: ${SRC_URL}" + echo "dest_url: ${DEST_URL}" + + - name: Install aws cli + if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }} + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + + - name: Process ARM binary + if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }} + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + run: | + echo "Downloading clickhouse binary..." + if ! aws s3 cp "${SRC_URL}/package_aarch64/clickhouse" clickhouse; then + echo "Failed to download clickhouse binary" + exit 1 + fi + chmod +x clickhouse + + echo "Running clickhouse binary..." + ./clickhouse -q'q' + + echo "Stripping the binary..." + strip clickhouse -o clickhouse-stripped + + echo "Uploading processed binaries..." + if ! aws s3 cp clickhouse "${SRC_URL}/package_aarch64/arm-bin/non-self-extracting/"; then + echo "Failed to upload clickhouse binary" + exit 1 + fi + if ! aws s3 cp clickhouse-stripped "${SRC_URL}/package_aarch64/arm-bin/non-self-extracting/"; then + echo "Failed to upload stripped clickhouse binary" + exit 1 + fi + + copy-packages: + needs: extract-package-info + runs-on: [altinity-func-tester, altinity-on-demand] + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + SRC_URL: ${{ needs.extract-package-info.outputs.src_url }} + DEST_URL: ${{ needs.extract-package-info.outputs.dest_url }} + NEEDS_BINARY_PROCESSING: ${{ needs.extract-package-info.outputs.needs_binary_processing }} + steps: + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: amd64 + + # - name: Download signed hash artifacts + # run: | + # run_id=$(echo "${{ inputs.workflow_url }}" | grep -oE '[0-9]+$') + # mkdir -p signed-hashes/amd64 signed-hashes/arm64 + + # # Download AMD64 hashes + # artifact_id=$(curl -s \ + # -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + # -H "Accept: application/vnd.github.v3+json" \ + # "https://api.github.com/repos/Altinity/ClickHouse/actions/runs/$run_id/artifacts?per_page=1000" \ + # | jq -r --arg NAME "Sign release signed-hashes" '.artifacts[] | select(.name == $NAME) | .id') + # if [ -z "$artifact_id" ] || [ "$artifact_id" == "null" ]; then + # echo "Error: Could not find artifact 'Sign release signed-hashes' for run $run_id" + # exit 1 + # fi + # if ! curl -L \ + # -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + # -H "Accept: application/vnd.github.v3+json" \ + # -o "signed-hashes/amd64/hashes.zip" \ + # "https://api.github.com/repos/Altinity/ClickHouse/actions/artifacts/$artifact_id/zip"; then + # echo "Error: Failed to download AMD64 hashes" + # exit 1 + # fi + # unzip -o "signed-hashes/amd64/hashes.zip" -d signed-hashes/amd64 + + # # Download ARM64 hashes + # artifact_id=$(curl -s \ + # -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + # -H "Accept: application/vnd.github.v3+json" \ + # "https://api.github.com/repos/Altinity/ClickHouse/actions/runs/$run_id/artifacts?per_page=1000" \ + # | jq -r --arg NAME "Sign aarch64 signed-hashes" '.artifacts[] | select(.name == $NAME) | .id') + # if [ -z "$artifact_id" ] || [ "$artifact_id" == "null" ]; then + # echo "Error: Could not find artifact 'Sign aarch64 signed-hashes' for run $run_id" + # exit 1 + # fi + # if ! curl -L \ + # -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + # -H "Accept: application/vnd.github.v3+json" \ + # -o "signed-hashes/arm64/hashes.zip" \ + # "https://api.github.com/repos/Altinity/ClickHouse/actions/artifacts/$artifact_id/zip"; then + # echo "Error: Failed to download ARM64 hashes" + # exit 1 + # fi + # unzip -o "signed-hashes/arm64/hashes.zip" -d signed-hashes/arm64 + + # - name: Download packages for verification + # run: | + # # Create temporary directories for downloaded packages + # mkdir -p /tmp/arm_packages /tmp/amd_packages + + # # Download ARM packages + # echo "Downloading ARM packages for verification..." + # if ! aws s3 sync "${SRC_URL}/package_aarch64/" /tmp/arm_packages; then + # echo "Failed to download ARM packages" + # exit 1 + # fi + + # # Download AMD packages + # echo "Downloading AMD packages for verification..." + # if ! aws s3 sync "${SRC_URL}/package_release/" /tmp/amd_packages; then + # echo "Failed to download AMD packages" + # exit 1 + # fi + + # - name: Verify ARM packages + # run: | + # cd signed-hashes/arm64 + # # Verify all files + # find /tmp/arm_packages -type f | while read -r file; do + # if [ -f "$file" ]; then + # file_name=$(basename "$file") + # echo "Verifying $file_name..." + + # if ! gpg --verify "$file_name.sha256.gpg" 2>/dev/null; then + # echo "GPG verification failed for $file_name" + # exit 1 + # fi + # if ! sha256sum -c "$file_name.sha256.gpg" 2>/dev/null; then + # echo "SHA256 verification failed for $file_name" + # exit 1 + # fi + # fi + # done + + # - name: Verify AMD packages + # run: | + # cd signed-hashes/amd64 + # # Verify all files + # find /tmp/amd_packages -type f | while read -r file; do + # if [ -f "$file" ]; then + # file_name=$(basename "$file") + # echo "Verifying $file_name..." + + # if ! gpg --verify "$file_name.sha256.gpg" 2>/dev/null; then + # echo "GPG verification failed for $file_name" + # exit 1 + # fi + # if ! sha256sum -c "$file_name.sha256.gpg" 2>/dev/null; then + # echo "SHA256 verification failed for $file_name" + # exit 1 + # fi + # fi + # done + + - name: Move verified packages to destination + run: | + # Move ARM packages + echo "Moving verified ARM packages to destination..." + if ! aws s3 cp "${SRC_URL}/package_aarch64/" "${DEST_URL}/packages/ARM_PACKAGES/" --recursive; then + echo "Failed to move ARM packages to destination" + exit 1 + fi + + # Move AMD packages + echo "Moving verified AMD packages to destination..." + if ! aws s3 cp "${SRC_URL}/package_release/" "${DEST_URL}/packages/AMD_PACKAGES/" --recursive; then + echo "Failed to move AMD packages to destination" + exit 1 + fi + + # Clean up temporary directories + rm -rf /tmp/arm_packages /tmp/amd_packages + + - name: Separate ARM binary + run: | + aws s3 mv "${DEST_URL}/packages/ARM_PACKAGES/clickhouse" "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/clickhouse" + aws s3 mv "${DEST_URL}/packages/ARM_PACKAGES/clickhouse-stripped" "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/clickhouse-stripped" + + - name: Separate AMD binary + run: | + aws s3 mv "${DEST_URL}/packages/AMD_PACKAGES/clickhouse" "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse" + aws s3 mv "${DEST_URL}/packages/AMD_PACKAGES/clickhouse-stripped" "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse-stripped" + + - name: Process AMD binary + if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }} + run: | + echo "Downloading clickhouse binary..." + if ! aws s3 cp "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse" clickhouse; then + echo "Failed to download clickhouse binary" + exit 1 + fi + chmod +x clickhouse + + echo "Running clickhouse binary..." + ./clickhouse -q'q' + + echo "Stripping the binary..." + strip clickhouse -o clickhouse-stripped + + echo "Uploading processed binaries..." + if ! aws s3 cp clickhouse "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/non-self-extracting/"; then + echo "Failed to upload clickhouse binary" + exit 1 + fi + if ! aws s3 cp clickhouse-stripped "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/non-self-extracting/"; then + echo "Failed to upload stripped clickhouse binary" + exit 1 + fi + + copy-test-results: + needs: extract-package-info + runs-on: [altinity-style-checker-aarch64, altinity-on-demand] + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + TEST_RESULTS_SRC: ${{ needs.extract-package-info.outputs.test_results_src }} + COMMIT_HASH: ${{ needs.extract-package-info.outputs.commit_hash }} + DEST_URL: ${{ needs.extract-package-info.outputs.dest_url }} + steps: + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + - name: Copy test results to S3 + run: | + # Copy test results + echo "Copying test results..." + if ! aws s3 sync "s3://${SRC_BUCKET}/${TEST_RESULTS_SRC}/${COMMIT_HASH}" \ + "${DEST_URL}/test_results/"; then + echo "Failed to copy test results" + exit 1 + fi + + # publish-docker: + # needs: extract-package-info + # strategy: + # matrix: + # image_type: [server, keeper] + # variant: ['', '-alpine'] + # uses: ./.github/workflows/docker_publish.yml + # with: + # docker_image: altinityinfra/clickhouse-${{ matrix.image_type }}:${{ needs.extract-package-info.outputs.docker_version }}${{ matrix.variant }} + # release_environment: ${{ inputs.release_environment }} + # upload_artifacts: false + # s3_upload_path: "${{ needs.extract-package-info.outputs.dest_url }}/docker_images/${{ matrix.image_type }}${{ matrix.variant }}/" + # secrets: inherit + + sign-and-publish: + needs: [extract-package-info, copy-packages] + runs-on: arc-runners-clickhouse-signer + env: + GPG_PASSPHRASE: ${{ inputs.release_environment == 'production' && inputs.GPG_PASSPHRASE || secrets.GPG_PASSPHRASE }} + REPO_DNS_NAME: ${{ inputs.release_environment == 'production' && 'builds.altinity.cloud' || 'builds.staging.altinity.cloud' }} + REPO_NAME: ${{ inputs.release_environment == 'production' && 'altinity' || 'altinity-staging' }} + REPO_SUBTITLE: ${{ inputs.release_environment == 'production' && 'Stable Builds' || 'Staging Builds' }} + PACKAGE_VERSION: ${{ needs.extract-package-info.outputs.package_version }} + FOLDER_TIME: ${{ needs.extract-package-info.outputs.folder_time }} + REPO_PREFIX: ${{ needs.extract-package-info.outputs.repo_prefix }} + NEEDS_BINARY_PROCESSING: ${{ needs.extract-package-info.outputs.needs_binary_processing }} + DEST_URL: ${{ needs.extract-package-info.outputs.dest_url }} + RELEASE_ENVIRONMENT: ${{ inputs.release_environment }} + steps: + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + + - name: Checkout repository + uses: actions/checkout@v4 + with: + repository: Altinity/ClickHouse + ref: antalya + path: ClickHouse + + - name: Download packages + run: | + if ! aws s3 cp "${DEST_URL}/packages/ARM_PACKAGES/" /home/runner/.cache/tmp/packages --recursive; then + echo "Failed to download ARM packages" + exit 1 + fi + if ! aws s3 cp "${DEST_URL}/packages/AMD_PACKAGES/" /home/runner/.cache/tmp/packages --recursive; then + echo "Failed to download AMD packages" + exit 1 + fi + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + + - name: Setup GPG + run: | + if [ -z ${GPG_PASSPHRASE} ] + then + echo "GPG_PASSPHRASE is not set" + exit 1 + fi + + - name: Process GPG key + run: | + echo "Processing GPG key..." + if ! aws secretsmanager get-secret-value --secret-id arn:aws:secretsmanager:us-east-1:446527654354:secret:altinity_staging_gpg-Rqbe8S --query SecretString --output text | sed -e "s/^'//" -e "s/'$//" | jq -r '.altinity_staging_gpg | @base64d' | gpg --batch --import; then + echo "Failed to import GPG key" + exit 1 + fi + gpg --list-secret-keys --with-keygrip + gpgconf --kill gpg-agent + gpg-agent --daemon --allow-preset-passphrase + if ! aws ssm get-parameter --name /gitlab-runner/key-encrypting-key --with-decryption --query Parameter.Value --output text | sudo tee /root/.key-encrypting-key >/dev/null; then + echo "Failed to get key encrypting key" + exit 1 + fi + GPG_KEY_NAME=$(gpg --list-secret-keys | grep uid | head --lines 1 | tr -s " " | cut -d " " -f 4-) + GPG_KEY_ID=$(gpg --list-secret-keys --with-keygrip "${GPG_KEY_NAME}" | grep Keygrip | head --lines 1 | tr -s " " | cut -d " " -f 4) + echo "$GPG_PASSPHRASE" | base64 -d | sudo openssl enc -d -aes-256-cbc -pbkdf2 -pass file:/root/.key-encrypting-key -in - -out - | /usr/lib/gnupg/gpg-preset-passphrase --preset $GPG_KEY_ID + + - name: Run Ansible playbook + run: | + echo "Running Ansible playbook for signing and publishing..." + echo "ansible-playbook -i ClickHouse/tests/ci/release/packaging/ansible/inventory/localhost.yml -e aws_region=$AWS_REGION -e gpg_key_id=\"$GPG_KEY_ID\" -e gpg_key_name=\"$GPG_KEY_NAME\" -e local_repo_path="/home/runner/.cache/${{ inputs.release_environment }}" -e pkgver=\"${PACKAGE_VERSION}\" -e release_environment=$RELEASE_ENVIRONMENT -e repo_dns_name=$REPO_DNS_NAME -e repo_name=$REPO_NAME -e repo_prefix=\"$REPO_PREFIX\" -e repo_subtitle=\"$REPO_SUBTITLE\" -e s3_pkgs_bucket=$S3_STORAGE_BUCKET -e s3_pkgs_path=\"builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}\" -e repo_path=\"/home/runner/.cache/${{ inputs.release_environment }}\" ClickHouse/tests/ci/release/packaging/ansible/sign-and-release.yml " + if ! ansible-playbook -i ClickHouse/tests/ci/release/packaging/ansible/inventory/localhost.yml \ + -e aws_region=$AWS_REGION \ + -e gpg_key_id="$GPG_KEY_ID" \ + -e gpg_key_name="$GPG_KEY_NAME" \ + -e local_repo_path="/home/runner/.cache/${{ inputs.release_environment }}" \ + -e pkgver="${PACKAGE_VERSION}" \ + -e release_environment=$RELEASE_ENVIRONMENT \ + -e repo_dns_name=$REPO_DNS_NAME \ + -e repo_name=$REPO_NAME \ + -e repo_prefix="$REPO_PREFIX" \ + -e repo_subtitle="$REPO_SUBTITLE" \ + -e s3_pkgs_bucket=$S3_STORAGE_BUCKET \ + -e s3_pkgs_path="builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}" \ + ClickHouse/tests/ci/release/packaging/ansible/sign-and-release.yml; then + echo "Ansible playbook failed" + exit 1 + fi + gpgconf --kill gpg-agent + ls -hal + + - name: Cleanup temporary files + if: always() + run: | + echo "Cleaning up temporary files..." + rm -rf /home/runner/.cache/tmp/packages || true + + repo-sanity-check: + needs: sign-and-publish + uses: Altinity/ClickHouse/.github/workflows/repo-sanity-checks.yml@antalya + + copy-to-released: + needs: [sign-and-publish] + if: ${{ inputs.release_environment == 'production' }} + runs-on: [altinity-style-checker-aarch64, altinity-on-demand] + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + PACKAGE_VERSION: ${{ needs.extract-package-info.outputs.package_version }} + DEST_URL: ${{ needs.extract-package-info.outputs.dest_url }} + steps: + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + + - name: Copy to released directory + run: | + - name: Copy to released directory + run: | + echo "Copying to released directory..." + echo "Source: ${DEST_URL}/" + echo "Destination: s3://${S3_STORAGE_BUCKET}/builds/released/v${PACKAGE_VERSION}/" + + if ! aws s3 sync "${DEST_URL}/" "s3://${S3_STORAGE_BUCKET}/builds/released/v${PACKAGE_VERSION}/" --no-progress; then + echo "Failed to copy to released directory" + exit 1 + fi + + echo "Verifying copy operation..." + if ! aws s3 ls "s3://${S3_STORAGE_BUCKET}/builds/released/v${PACKAGE_VERSION}/" | grep -q "packages"; then + echo "Error: Packages directory not found in destination" + exit 1 + fi diff --git a/.github/workflows/vectorsearchstress.yml b/.github/workflows/vectorsearchstress.yml index 9efa31333d21..ebf1544f7fbd 100644 --- a/.github/workflows/vectorsearchstress.yml +++ b/.github/workflows/vectorsearchstress.yml @@ -16,7 +16,7 @@ env: jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -28,6 +28,26 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -55,7 +75,7 @@ jobs: fi dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] name: "Dockers Build (amd)" outputs: @@ -67,6 +87,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -94,7 +121,7 @@ jobs: fi dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] name: "Dockers Build (arm)" outputs: @@ -106,6 +133,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -133,7 +167,7 @@ jobs: fi vector_search_stress: - runs-on: [self-hosted, arm-medium] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm] name: "Vector Search Stress" outputs: @@ -145,6 +179,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Vector Search Stress" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -172,7 +213,7 @@ jobs: fi finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, vector_search_stress] if: ${{ always() }} name: "Finish Workflow" @@ -185,6 +226,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp diff --git a/ci/defs/defs.py b/ci/defs/defs.py index 561f7daa53cb..8adc85dc249d 100644 --- a/ci/defs/defs.py +++ b/ci/defs/defs.py @@ -1,36 +1,59 @@ from praktika import Artifact, Docker, Job, Secret from praktika.utils import MetaClasses, Utils +from settings import altinity_overrides # i.e. "ClickHouse/ci/tmp" TEMP_DIR = f"{Utils.cwd()}/ci/tmp" # == _Settings.TEMP_DIR != env_helper.TEMP_PATH -SYNC = "CH Inc sync" +SYNC = "Altinity sync" -S3_BUCKET_NAME = "clickhouse-builds" -S3_REPORT_BUCKET_NAME = "clickhouse-test-reports" -S3_BUCKET_HTTP_ENDPOINT = "clickhouse-builds.s3.amazonaws.com" -S3_REPORT_BUCKET_HTTP_ENDPOINT = "s3.amazonaws.com/clickhouse-test-reports" +S3_BUCKET_NAME = altinity_overrides.S3_BUCKET_NAME +S3_REPORT_BUCKET_NAME = altinity_overrides.S3_REPORT_BUCKET_NAME +S3_BUCKET_HTTP_ENDPOINT = altinity_overrides.S3_BUCKET_HTTP_ENDPOINT +S3_REPORT_BUCKET_HTTP_ENDPOINT = altinity_overrides.S3_REPORT_BUCKET_HTTP_ENDPOINT class RunnerLabels: CI_SERVICES = "ci_services" CI_SERVICES_EBS = "ci_services_ebs" - FUNC_TESTER_AMD = ["self-hosted", "amd-medium"] - FUNC_TESTER_ARM = ["self-hosted", "arm-medium"] - AMD_LARGE = ["self-hosted", "amd-large"] - ARM_LARGE = ["self-hosted", "arm-large"] - AMD_MEDIUM = ["self-hosted", "amd-medium"] - ARM_MEDIUM = ["self-hosted", "arm-medium"] - AMD_MEDIUM_CPU = ["self-hosted", "amd-medium-cpu"] - ARM_MEDIUM_CPU = ["self-hosted", "arm-medium-cpu"] - AMD_MEDIUM_MEM = ["self-hosted", "amd-medium-mem"] - ARM_MEDIUM_MEM = ["self-hosted", "arm-medium-mem"] - AMD_SMALL = ["self-hosted", "amd-small"] - ARM_SMALL = ["self-hosted", "arm-small"] - AMD_SMALL_MEM = ["self-hosted", "amd-small-mem"] - ARM_SMALL_MEM = ["self-hosted", "arm-small-mem"] - STYLE_CHECK_AMD = ["self-hosted", "style-checker"] - STYLE_CHECK_ARM = ["self-hosted", "style-checker-aarch64"] + BUILDER_AMD = ["self-hosted", "altinity-on-demand", "altinity-builder"] + BUILDER_ARM = ["self-hosted", "altinity-on-demand", "altinity-builder"] + FUNC_TESTER_AMD = ["self-hosted", "altinity-on-demand", "altinity-func-tester"] + FUNC_TESTER_ARM = [ + "self-hosted", + "altinity-on-demand", + "altinity-func-tester-aarch64", + ] + AMD_LARGE = ["self-hosted", "altinity-on-demand", "altinity-func-tester"] + ARM_LARGE = ["self-hosted", "altinity-on-demand", "altinity-func-tester-aarch64"] + AMD_MEDIUM = ["self-hosted", "altinity-on-demand", "altinity-func-tester"] + ARM_MEDIUM = ["self-hosted", "altinity-on-demand", "altinity-func-tester-aarch64"] + AMD_MEDIUM_CPU = ["self-hosted", "altinity-on-demand", "altinity-func-tester"] + ARM_MEDIUM_CPU = [ + "self-hosted", + "altinity-on-demand", + "altinity-func-tester-aarch64", + ] + AMD_MEDIUM_MEM = ["self-hosted", "altinity-on-demand", "altinity-func-tester"] + ARM_MEDIUM_MEM = [ + "self-hosted", + "altinity-on-demand", + "altinity-func-tester-aarch64", + ] + AMD_SMALL = ["self-hosted", "altinity-on-demand", "altinity-style-checker"] + ARM_SMALL = ["self-hosted", "altinity-on-demand", "altinity-style-checker-aarch64"] + AMD_SMALL_MEM = ["self-hosted", "altinity-on-demand", "altinity-style-checker"] + ARM_SMALL_MEM = [ + "self-hosted", + "altinity-on-demand", + "altinity-style-checker-aarch64", + ] + STYLE_CHECK_AMD = ["self-hosted", "altinity-on-demand", "altinity-style-checker"] + STYLE_CHECK_ARM = [ + "self-hosted", + "altinity-on-demand", + "altinity-style-checker-aarch64", + ] class CIFiles: @@ -38,7 +61,7 @@ class CIFiles: UNIT_TESTS_BIN = f"{TEMP_DIR}/build/src/unit_tests_dbms" -BASE_BRANCH = "master" +BASE_BRANCH = altinity_overrides.MAIN_BRANCH azure_secret = Secret.Config( name="azure_connection_string", @@ -53,237 +76,242 @@ class CIFiles: SECRETS = [ Secret.Config( - name="dockerhub_robot_password", - type=Secret.Type.AWS_SSM_PARAMETER, + name=altinity_overrides.DOCKERHUB_SECRET, + type=Secret.Type.GH_SECRET, ), Secret.Config( - name="clickhouse-test-stat-url", - type=Secret.Type.AWS_SSM_PARAMETER, - region="us-east-1", + name=altinity_overrides.SECRET_CI_DB_URL, + type=Secret.Type.GH_SECRET, ), Secret.Config( - name="clickhouse-test-stat-login", - type=Secret.Type.AWS_SSM_PARAMETER, - region="us-east-1", + name=altinity_overrides.SECRET_CI_DB_USER, + type=Secret.Type.GH_SECRET, ), Secret.Config( - name="clickhouse-test-stat-password", - type=Secret.Type.AWS_SSM_PARAMETER, - region="us-east-1", + name=altinity_overrides.SECRET_CI_DB_PASSWORD, + type=Secret.Type.GH_SECRET, ), - azure_secret, + # azure_secret, chcache_secret, + # Secret.Config( + # name="woolenwolf_gh_app.clickhouse-app-id", + # type=Secret.Type.AWS_SSM_SECRET, + # ), + # Secret.Config( + # name="woolenwolf_gh_app.clickhouse-app-key", + # type=Secret.Type.AWS_SSM_SECRET, + # ), Secret.Config( - name="woolenwolf_gh_app.clickhouse-app-id", - type=Secret.Type.AWS_SSM_SECRET, + name="AWS_ACCESS_KEY_ID", + type=Secret.Type.GH_SECRET, ), Secret.Config( - name="woolenwolf_gh_app.clickhouse-app-key", - type=Secret.Type.AWS_SSM_SECRET, + name="AWS_SECRET_ACCESS_KEY", + type=Secret.Type.GH_SECRET, ), ] DOCKERS = [ Docker.Config( - name="clickhouse/style-test", + name="altinityinfra/style-test", path="./ci/docker/style-test", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/fasttest", + name="altinityinfra/fasttest", path="./ci/docker/fasttest", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/binary-builder", + name="altinityinfra/binary-builder", path="./ci/docker/binary-builder", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/fasttest"], + depends_on=["altinityinfra/fasttest"], ), Docker.Config( - name="clickhouse/stateless-test", + name="altinityinfra/stateless-test", path="./ci/docker/stateless-test", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/cctools", + name="altinityinfra/cctools", path="./ci/docker/cctools", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/fasttest"], + depends_on=["altinityinfra/fasttest"], ), Docker.Config( - name="clickhouse/test-base", + name="altinityinfra/test-base", path="./ci/docker/test-base", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/stress-test", + name="altinityinfra/stress-test", path="./ci/docker/stress-test", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/stateless-test"], + depends_on=["altinityinfra/stateless-test"], ), Docker.Config( - name="clickhouse/fuzzer", + name="altinityinfra/fuzzer", path="./ci/docker/fuzzer", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/performance-comparison", + name="altinityinfra/performance-comparison", path="./ci/docker/performance-comparison", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/keeper-jepsen-test", + name="altinityinfra/keeper-jepsen-test", path="./ci/docker/keeper-jepsen-test", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/server-jepsen-test", + name="altinityinfra/server-jepsen-test", path="./ci/docker/server-jepsen-test", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/integration-test", + name="altinityinfra/integration-test", path="./ci/docker/integration/base", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/integration-tests-runner", + name="altinityinfra/integration-tests-runner", path="./ci/docker/integration/runner", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/integration-test-with-unity-catalog", + name="altinityinfra/integration-test-with-unity-catalog", path="./ci/docker/integration/clickhouse_with_unity_catalog", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/integration-test-with-hms", + name="altinityinfra/integration-test-with-hms", path="./ci/docker/integration/clickhouse_with_hms_catalog", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/integration-helper", + name="altinityinfra/integration-helper", path="./ci/docker/integration/helper_container", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/kerberos-kdc", + name="altinityinfra/kerberos-kdc", path="./ci/docker/integration/kerberos_kdc", platforms=[Docker.Platforms.AMD], depends_on=[], ), Docker.Config( - name="clickhouse/test-mysql80", + name="altinityinfra/test-mysql80", path="./ci/docker/integration/mysql80", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/test-mysql57", + name="altinityinfra/test-mysql57", path="./ci/docker/integration/mysql57", platforms=Docker.Platforms.AMD, depends_on=[], ), Docker.Config( - name="clickhouse/mysql-golang-client", + name="altinityinfra/mysql-golang-client", path="./ci/docker/integration/mysql_golang_client", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/mysql-java-client", + name="altinityinfra/mysql-java-client", path="./ci/docker/integration/mysql_java_client", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/mysql-js-client", + name="altinityinfra/mysql-js-client", path="./ci/docker/integration/mysql_js_client", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/arrowflight-server-test", + name="altinityinfra/arrowflight-server-test", path="./ci/docker/integration/arrowflight", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/dotnet-client", + name="altinityinfra/dotnet-client", path="./ci/docker/integration/dotnet_client", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/mysql-php-client", + name="altinityinfra/mysql-php-client", path="./ci/docker/integration/mysql_php_client", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/nginx-dav", + name="altinityinfra/nginx-dav", path="./ci/docker/integration/nginx_dav", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/postgresql-java-client", + name="altinityinfra/postgresql-java-client", path="./ci/docker/integration/postgresql_java_client", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/python-bottle", + name="altinityinfra/python-bottle", path="./ci/docker/integration/resolver", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/s3-proxy", + name="altinityinfra/s3-proxy", path="./ci/docker/integration/s3_proxy", platforms=Docker.Platforms.arm_amd, depends_on=[], ), + # Docker.Config( + # name="clickhouse/docs-builder", + # path="./ci/docker/docs-builder", + # platforms=Docker.Platforms.arm_amd, + # depends_on=[], + # ), Docker.Config( - name="clickhouse/docs-builder", - path="./ci/docker/docs-builder", - platforms=Docker.Platforms.arm_amd, - depends_on=[], - ), - Docker.Config( - name="clickhouse/install-deb-test", + name="altinityinfra/install-deb-test", path="./ci/docker/install/deb", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/install-rpm-test", + name="altinityinfra/install-rpm-test", path="./ci/docker/install/rpm", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/sqlancer-test", + name="altinityinfra/sqlancer-test", path="./ci/docker/sqlancer-test", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/mysql_dotnet_client", + name="altinityinfra/mysql_dotnet_client", path="./ci/docker/integration/mysql_dotnet_client", platforms=Docker.Platforms.arm_amd, depends_on=[], @@ -364,12 +392,14 @@ class ToolSet: class ArtifactNames: CH_AMD_DEBUG = "CH_AMD_DEBUG" CH_AMD_RELEASE = "CH_AMD_RELEASE" + CH_AMD_RELEASE_STRIPPED = "CH_AMD_RELEASE_STRIPPED" CH_AMD_ASAN = "CH_AMD_ASAN" CH_AMD_TSAN = "CH_AMD_TSAN" CH_AMD_MSAN = "CH_AMD_MSAN" CH_AMD_UBSAN = "CH_AMD_UBSAN" CH_AMD_BINARY = "CH_AMD_BINARY" CH_ARM_RELEASE = "CH_ARM_RELEASE" + CH_ARM_RELEASE_STRIPPED = "CH_ARM_RELEASE_STRIPPED" CH_ARM_ASAN = "CH_ARM_ASAN" CH_ARM_TSAN = "CH_ARM_TSAN" @@ -444,6 +474,16 @@ class ArtifactConfigs: ArtifactNames.CH_LOONGARCH64, ] ) + clickhouse_stripped_binaries = Artifact.Config( + name="...", + type=Artifact.Type.S3, + path=f"{TEMP_DIR}/build/programs/self-extracting/clickhouse-stripped", + ).parametrize( + names=[ + ArtifactNames.CH_AMD_RELEASE_STRIPPED, + ArtifactNames.CH_ARM_RELEASE_STRIPPED, + ] + ) clickhouse_debians = Artifact.Config( name="*", type=Artifact.Type.S3, diff --git a/ci/defs/job_configs.py b/ci/defs/job_configs.py index 88534171a805..52be688303a7 100644 --- a/ci/defs/job_configs.py +++ b/ci/defs/job_configs.py @@ -5,10 +5,14 @@ LIMITED_MEM = Utils.physical_memory() - 2 * 1024**3 +# NOTE (strtgbb): We use ZRAM, so it's okay to use more memory than is physically available +LIMITED_MEM = LIMITED_MEM * 2 + BINARY_DOCKER_COMMAND = ( - "clickhouse/binary-builder+--network=host+" + "altinityinfra/binary-builder+--network=host+" f"--memory={Utils.physical_memory() * 95 // 100}+" f"--memory-reservation={Utils.physical_memory() * 9 // 10}" + '+--env=AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID"+--env=AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY"' ) if Utils.is_arm(): @@ -28,7 +32,7 @@ "./programs", "./rust", "./ci/jobs/build_clickhouse.py", - "./ci/jobs/scripts/job_hooks/build_profile_hook.py", + # "./ci/jobs/scripts/job_hooks/build_profile_hook.py", "./utils/list-licenses", "./utils/self-extracting-executable", ], @@ -52,7 +56,7 @@ # some tests can be flaky due to very slow disks - use tmpfs for temporary ClickHouse files # --cap-add=SYS_PTRACE and --privileged for gdb in docker # --root/--privileged/--cgroupns=host is required for clickhouse-test --memory-limit - run_in_docker=f"clickhouse/stateless-test+--memory={LIMITED_MEM}+--cgroupns=host+--cap-add=SYS_PTRACE+--privileged+--security-opt seccomp=unconfined+--tmpfs /tmp/clickhouse:mode=1777+--volume=./ci/tmp/var/lib/clickhouse:/var/lib/clickhouse+--volume=./ci/tmp/etc/clickhouse-client:/etc/clickhouse-client+--volume=./ci/tmp/etc/clickhouse-server:/etc/clickhouse-server+--volume=./ci/tmp/etc/clickhouse-server1:/etc/clickhouse-server1+--volume=./ci/tmp/etc/clickhouse-server2:/etc/clickhouse-server2+--volume=./ci/tmp/var/log:/var/log+root", + run_in_docker=f"altinityinfra/stateless-test+--memory={LIMITED_MEM}+--cgroupns=host+--cap-add=SYS_PTRACE+--privileged+--security-opt seccomp=unconfined+--tmpfs /tmp/clickhouse:mode=1777+--volume=./ci/tmp/var/lib/clickhouse:/var/lib/clickhouse+--volume=./ci/tmp/etc/clickhouse-client:/etc/clickhouse-client+--volume=./ci/tmp/etc/clickhouse-server:/etc/clickhouse-server+--volume=./ci/tmp/etc/clickhouse-server1:/etc/clickhouse-server1+--volume=./ci/tmp/etc/clickhouse-server2:/etc/clickhouse-server2+--volume=./ci/tmp/var/log:/var/log+root+--env=AZURE_STORAGE_KEY=$AZURE_STORAGE_KEY+--env=AZURE_ACCOUNT_NAME=$AZURE_ACCOUNT_NAME+--env=AZURE_CONTAINER_NAME=$AZURE_CONTAINER_NAME+--env=AZURE_STORAGE_ACCOUNT_URL=$AZURE_STORAGE_ACCOUNT_URL", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/functional_tests.py", @@ -65,6 +69,7 @@ "./tests/config", "./tests/*.txt", "./ci/docker/stateless-test", + "./tests/broken_tests.yaml", ], ), result_name_for_cidb="Tests", @@ -104,13 +109,14 @@ "./ci/jobs/scripts/docker_in_docker.sh", ], ), - run_in_docker=f"clickhouse/integration-tests-runner+root+--memory={LIMITED_MEM}+--privileged+--dns-search='.'+--security-opt seccomp=unconfined+--cap-add=SYS_PTRACE+{docker_sock_mount}+--volume=clickhouse_integration_tests_volume:/var/lib/docker+--cgroupns=host", + run_in_docker=f"altinityinfra/integration-tests-runner+root+--memory={LIMITED_MEM}+--privileged+--dns-search='.'+--security-opt seccomp=unconfined+--cap-add=SYS_PTRACE+{docker_sock_mount}+--volume=clickhouse_integration_tests_volume:/var/lib/docker+--cgroupns=host", ) BINARY_DOCKER_COMMAND = ( - "clickhouse/binary-builder+--network=host+" + "altinityinfra/binary-builder+--network=host+" f"--memory={Utils.physical_memory() * 95 // 100}+" f"--memory-reservation={Utils.physical_memory() * 9 // 10}" + '+--env=AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID"+--env=AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY"' ) @@ -119,7 +125,7 @@ class JobConfigs: name=JobNames.STYLE_CHECK, runs_on=RunnerLabels.STYLE_CHECK_ARM, command="python3 ./ci/jobs/check_style.py", - run_in_docker="clickhouse/style-test", + run_in_docker="altinityinfra/style-test", enable_commit_status=True, ) pr_body = Job.Config( @@ -135,7 +141,7 @@ class JobConfigs: command="python3 ./ci/jobs/fast_test.py", # --network=host required for ec2 metadata http endpoint to work # --root/--privileged/--cgroupns=host is required for clickhouse-test --memory-limit - run_in_docker="clickhouse/fasttest+--network=host+--privileged+--cgroupns=host+--volume=./ci/tmp/var/lib/clickhouse:/var/lib/clickhouse+--volume=./ci/tmp/etc/clickhouse-client:/etc/clickhouse-client+--volume=./ci/tmp/etc/clickhouse-server:/etc/clickhouse-server+--volume=./ci/tmp/var/log:/var/log+root", + run_in_docker="altinityinfra/fasttest+--network=host+--privileged+--cgroupns=host+--volume=./ci/tmp/var/lib/clickhouse:/var/lib/clickhouse+--volume=./ci/tmp/etc/clickhouse-client:/etc/clickhouse-client+--volume=./ci/tmp/etc/clickhouse-server:/etc/clickhouse-server+--volume=./ci/tmp/var/log:/var/log+root+--env=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID+--env=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/fast_test.py", @@ -172,7 +178,7 @@ class JobConfigs: build_jobs = common_build_job_config.set_post_hooks( post_hooks=[ "python3 ./ci/jobs/scripts/job_hooks/build_master_head_hook.py", - "python3 ./ci/jobs/scripts/job_hooks/build_profile_hook.py", + # "python3 ./ci/jobs/scripts/job_hooks/build_profile_hook.py", ], ).parametrize( Job.ParamSet( @@ -247,13 +253,14 @@ class JobConfigs: release_build_jobs = common_build_job_config.set_post_hooks( post_hooks=[ "python3 ./ci/jobs/scripts/job_hooks/build_master_head_hook.py", - "python3 ./ci/jobs/scripts/job_hooks/build_profile_hook.py", + # "python3 ./ci/jobs/scripts/job_hooks/build_profile_hook.py", ], ).parametrize( Job.ParamSet( parameter=BuildTypes.AMD_RELEASE, provides=[ ArtifactNames.CH_AMD_RELEASE, + ArtifactNames.CH_AMD_RELEASE_STRIPPED, ArtifactNames.DEB_AMD_RELEASE, ArtifactNames.RPM_AMD_RELEASE, ArtifactNames.TGZ_AMD_RELEASE, @@ -265,17 +272,18 @@ class JobConfigs: parameter=BuildTypes.ARM_RELEASE, provides=[ ArtifactNames.CH_ARM_RELEASE, + ArtifactNames.CH_ARM_RELEASE_STRIPPED, ArtifactNames.DEB_ARM_RELEASE, ArtifactNames.RPM_ARM_RELEASE, ArtifactNames.TGZ_ARM_RELEASE, ], - runs_on=RunnerLabels.ARM_LARGE, + runs_on=RunnerLabels.BUILDER_ARM, ), ) extra_validation_build_jobs = common_build_job_config.set_post_hooks( post_hooks=[ "python3 ./ci/jobs/scripts/job_hooks/build_master_head_hook.py", - "python3 ./ci/jobs/scripts/job_hooks/build_profile_hook.py", + # "python3 ./ci/jobs/scripts/job_hooks/build_profile_hook.py", ], ).parametrize( Job.ParamSet( @@ -363,7 +371,7 @@ class JobConfigs: ).parametrize( Job.ParamSet( parameter="amd_release", - runs_on=RunnerLabels.STYLE_CHECK_AMD, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ ArtifactNames.DEB_AMD_RELEASE, ArtifactNames.CH_AMD_RELEASE, @@ -373,7 +381,7 @@ class JobConfigs: ), Job.ParamSet( parameter="arm_release", - runs_on=RunnerLabels.STYLE_CHECK_ARM, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ ArtifactNames.DEB_ARM_RELEASE, ArtifactNames.CH_ARM_RELEASE, @@ -396,7 +404,7 @@ class JobConfigs: ).parametrize( Job.ParamSet( parameter="amd_release", - runs_on=RunnerLabels.STYLE_CHECK_AMD, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ ArtifactNames.DEB_AMD_RELEASE, ArtifactNames.RPM_AMD_RELEASE, @@ -406,7 +414,7 @@ class JobConfigs: ), Job.ParamSet( parameter="arm_release", - runs_on=RunnerLabels.STYLE_CHECK_ARM, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ ArtifactNames.DEB_ARM_RELEASE, ArtifactNames.RPM_ARM_RELEASE, @@ -418,14 +426,14 @@ class JobConfigs: stateless_tests_flaky_pr_jobs = common_ft_job_config.parametrize( Job.ParamSet( parameter="amd_asan, flaky check", - runs_on=RunnerLabels.AMD_MEDIUM, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_ASAN], ), ) stateless_tests_targeted_pr_jobs = common_ft_job_config.parametrize( Job.ParamSet( parameter="arm_asan, targeted", - runs_on=RunnerLabels.ARM_MEDIUM, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ArtifactNames.CH_ARM_ASAN], ), ) @@ -435,7 +443,7 @@ class JobConfigs: runs_on=RunnerLabels.FUNC_TESTER_ARM, command="python3 ./ci/jobs/functional_tests.py --options BugfixValidation", # some tests can be flaky due to very slow disks - use tmpfs for temporary ClickHouse files - run_in_docker="clickhouse/stateless-test+--network=host+--privileged+--cgroupns=host+root+--security-opt seccomp=unconfined+--tmpfs /tmp/clickhouse:mode=1777", + run_in_docker="altinityinfra/stateless-test+--network=host+--privileged+--cgroupns=host+root+--security-opt seccomp=unconfined+--tmpfs /tmp/clickhouse:mode=1777", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/functional_tests.py", @@ -463,7 +471,7 @@ class JobConfigs: *[ Job.ParamSet( parameter=f"amd_asan, distributed plan, parallel, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_MEDIUM_CPU, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_ASAN], ) for total_batches in (2,) @@ -471,53 +479,53 @@ class JobConfigs: ], Job.ParamSet( parameter="amd_asan, db disk, distributed plan, sequential", - runs_on=RunnerLabels.AMD_SMALL_MEM, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_ASAN], ), Job.ParamSet( parameter="amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel", - runs_on=RunnerLabels.AMD_MEDIUM, # large machine - no boost, why? + runs_on=RunnerLabels.FUNC_TESTER_AMD, # large machine - no boost, why? requires=[ArtifactNames.CH_AMD_BINARY], ), Job.ParamSet( parameter="amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential", - runs_on=RunnerLabels.AMD_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_BINARY], ), Job.ParamSet( parameter="amd_binary, ParallelReplicas, s3 storage, parallel", - runs_on=RunnerLabels.AMD_MEDIUM, # large machine - no boost, why? + runs_on=RunnerLabels.FUNC_TESTER_AMD, # large machine - no boost, why? requires=[ArtifactNames.CH_AMD_BINARY], ), Job.ParamSet( parameter="amd_binary, ParallelReplicas, s3 storage, sequential", - runs_on=RunnerLabels.AMD_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_BINARY], ), Job.ParamSet( parameter="amd_debug, AsyncInsert, s3 storage, parallel", - runs_on=RunnerLabels.AMD_MEDIUM, # large machine - no boost, why? + runs_on=RunnerLabels.FUNC_TESTER_AMD, # large machine - no boost, why? requires=[ArtifactNames.CH_AMD_DEBUG], ), Job.ParamSet( parameter="amd_debug, AsyncInsert, s3 storage, sequential", - runs_on=RunnerLabels.AMD_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_DEBUG], ), Job.ParamSet( parameter="amd_debug, parallel", - runs_on=RunnerLabels.AMD_MEDIUM_CPU, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_DEBUG], ), Job.ParamSet( parameter="amd_debug, sequential", - runs_on=RunnerLabels.AMD_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_DEBUG], ), *[ Job.ParamSet( parameter=f"amd_tsan, parallel, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_LARGE, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_TSAN], ) for total_batches in (2,) @@ -526,7 +534,7 @@ class JobConfigs: *[ Job.ParamSet( parameter=f"amd_tsan, sequential, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_TSAN], ) for total_batches in (2,) @@ -535,7 +543,7 @@ class JobConfigs: *[ Job.ParamSet( parameter=f"amd_msan, parallel, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_LARGE, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_MSAN], ) for total_batches in (2,) @@ -544,7 +552,7 @@ class JobConfigs: *[ Job.ParamSet( parameter=f"amd_msan, sequential, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_SMALL_MEM, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_MSAN], ) for total_batches in (2,) @@ -552,28 +560,28 @@ class JobConfigs: ], Job.ParamSet( parameter="amd_ubsan, parallel", - runs_on=RunnerLabels.AMD_SMALL_MEM, # it runs much faster than many job, no need larger machine + runs_on=RunnerLabels.FUNC_TESTER_AMD, # it runs much faster than many job, no need larger machine requires=[ArtifactNames.CH_AMD_UBSAN], ), Job.ParamSet( parameter="amd_ubsan, sequential", - runs_on=RunnerLabels.AMD_SMALL_MEM, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_UBSAN], ), Job.ParamSet( parameter="amd_debug, distributed plan, s3 storage, parallel", - runs_on=RunnerLabels.AMD_MEDIUM, # large machine - no boost, why? + runs_on=RunnerLabels.FUNC_TESTER_AMD, # large machine - no boost, why? requires=[ArtifactNames.CH_AMD_DEBUG], ), Job.ParamSet( parameter="amd_debug, distributed plan, s3 storage, sequential", - runs_on=RunnerLabels.AMD_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_DEBUG], ), *[ Job.ParamSet( parameter=f"amd_tsan, s3 storage, parallel, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_MEDIUM, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_TSAN], ) for total_batches in (2,) @@ -582,7 +590,7 @@ class JobConfigs: *[ Job.ParamSet( parameter=f"amd_tsan, s3 storage, sequential, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_SMALL_MEM, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_TSAN], ) for total_batches in (2,) @@ -590,12 +598,12 @@ class JobConfigs: ], Job.ParamSet( parameter="arm_binary, parallel", - runs_on=RunnerLabels.ARM_MEDIUM_CPU, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ArtifactNames.CH_ARM_BINARY], ), Job.ParamSet( parameter="arm_binary, sequential", - runs_on=RunnerLabels.ARM_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ArtifactNames.CH_ARM_BINARY], ), ) @@ -603,7 +611,7 @@ class JobConfigs: *[ Job.ParamSet( parameter=f"{BuildTypes.AMD_COVERAGE}, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_COV_BIN], ) for total_batches in (8,) @@ -614,19 +622,19 @@ class JobConfigs: common_ft_job_config.set_allow_merge_on_failure(True).parametrize( Job.ParamSet( parameter="arm_asan, azure, parallel", - runs_on=RunnerLabels.ARM_MEDIUM, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ArtifactNames.CH_ARM_ASAN], ), Job.ParamSet( parameter="arm_asan, azure, sequential", - runs_on=RunnerLabels.ARM_SMALL_MEM, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ArtifactNames.CH_ARM_ASAN], ), ) ) bugfix_validation_it_job = ( common_integration_test_job_config.set_name(JobNames.BUGFIX_VALIDATE_IT) - .set_runs_on(RunnerLabels.AMD_SMALL_MEM) + .set_runs_on(RunnerLabels.FUNC_TESTER_AMD) .set_command( "python3 ./ci/jobs/integration_test_job.py --options BugfixValidation" ) @@ -635,7 +643,7 @@ class JobConfigs: name=JobNames.UNITTEST, runs_on=[], # from parametrize() command=f"python3 ./ci/jobs/unit_tests_job.py", - run_in_docker="clickhouse/fasttest+--privileged", + run_in_docker="altinityinfra/fasttest+--privileged", digest_config=Job.CacheDigestConfig( include_paths=["./ci/jobs/unit_tests_job.py"], ), @@ -920,7 +928,7 @@ class JobConfigs: runs_on=["#from param"], command='python3 ./ci/jobs/performance_tests.py --test-options "{PARAMETER}"', # TODO: switch to stateless-test image - run_in_docker="clickhouse/performance-comparison", + run_in_docker="altinityinfra/performance-comparison", digest_config=Job.CacheDigestConfig( include_paths=[ "./tests/performance/", @@ -956,7 +964,7 @@ class JobConfigs: runs_on=["#from param"], command='python3 ./ci/jobs/performance_tests.py --test-options "{PARAMETER}"', # TODO: switch to stateless-test image - run_in_docker="clickhouse/performance-comparison", + run_in_docker="altinityinfra/performance-comparison", digest_config=Job.CacheDigestConfig( include_paths=[ "./tests/performance/", @@ -989,7 +997,7 @@ class JobConfigs: "./ci/jobs/scripts/functional_tests/setup_log_cluster.sh", ], ), - run_in_docker="clickhouse/stateless-test+--shm-size=16g+--network=host", + run_in_docker="altinityinfra/stateless-test+--shm-size=16g+--network=host", ).parametrize( Job.ParamSet( parameter=BuildTypes.AMD_RELEASE, @@ -1015,7 +1023,7 @@ class JobConfigs: "./src/Functions", ], ), - run_in_docker="clickhouse/docs-builder", + run_in_docker="altinityinfra/docs-builder", requires=[JobNames.STYLE_CHECK, ArtifactNames.CH_ARM_BINARY], ) docker_server = Job.Config( @@ -1027,6 +1035,8 @@ class JobConfigs: "./ci/jobs/docker_server.py", "./docker/server", "./docker/keeper", + ".github/grype", + ".github/workflows/grype_scan.yml", ], ), requires=["Build (amd_release)", "Build (arm_release)"], @@ -1041,6 +1051,8 @@ class JobConfigs: "./ci/jobs/docker_server.py", "./docker/server", "./docker/keeper", + ".github/grype", + ".github/workflows/grype_scan.yml", ], ), requires=["Build (amd_release)", "Build (arm_release)"], @@ -1053,7 +1065,7 @@ class JobConfigs: digest_config=Job.CacheDigestConfig( include_paths=["./ci/jobs/sqlancer_job.sh", "./ci/docker/sqlancer-test"], ), - run_in_docker="clickhouse/sqlancer-test", + run_in_docker="altinityinfra/sqlancer-test", timeout=3600, ).parametrize( Job.ParamSet( @@ -1072,7 +1084,7 @@ class JobConfigs: ], ), requires=[ArtifactNames.CH_ARM_RELEASE], - run_in_docker="clickhouse/stateless-test", + run_in_docker="altinityinfra/stateless-test", timeout=10800, ) jepsen_keeper = Job.Config( @@ -1096,6 +1108,6 @@ class JobConfigs: vector_search_stress_job = Job.Config( name="Vector Search Stress", runs_on=RunnerLabels.ARM_MEDIUM, - run_in_docker="clickhouse/performance-comparison", + run_in_docker="altinityinfra/performance-comparison", command="python3 ./ci/jobs/vector_search_stress_tests.py", ) diff --git a/ci/docker/binary-builder/Dockerfile b/ci/docker/binary-builder/Dockerfile index ee655c2f2056..bb6132e00bba 100644 --- a/ci/docker/binary-builder/Dockerfile +++ b/ci/docker/binary-builder/Dockerfile @@ -1,6 +1,6 @@ -# docker build -t clickhouse/binary-builder . +# docker build -t altinityinfra/binary-builder . ARG FROM_TAG -FROM clickhouse/fasttest:$FROM_TAG +FROM altinityinfra/fasttest:$FROM_TAG ARG TARGETARCH ENV CC=clang-${LLVM_VERSION} ENV CXX=clang++-${LLVM_VERSION} @@ -8,10 +8,10 @@ ENV CXX=clang++-${LLVM_VERSION} #non-functional change # If the cctools is updated, then first build it in the CI, then update here in a different commit -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /cctools /cctools +COPY --from=altinityinfra/cctools:33434f0a9e7e979bc907 /cctools /cctools # We need OpenSSL FIPS in permissive mode for build on MasterCI -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \ +COPY --from=altinityinfra/cctools:33434f0a9e7e979bc907 \ /opt/openssl-fips/openssl.cnf \ /opt/openssl-fips/fipsmodule.cnf \ /opt/openssl-fips/fips.so \ diff --git a/ci/docker/cctools/Dockerfile b/ci/docker/cctools/Dockerfile index 3a77e61187ab..b3bfab7a805c 100644 --- a/ci/docker/cctools/Dockerfile +++ b/ci/docker/cctools/Dockerfile @@ -1,10 +1,10 @@ -# docker build -t clickhouse/cctools . +# docker build -t altinityinfra/cctools . # This is a hack to significantly reduce the build time of the clickhouse/binary-builder # It's based on the assumption that we don't care of the cctools version so much # It even does not depend on the clickhouse/fasttest in the `docker/images.json` ARG FROM_TAG=latest -FROM clickhouse/fasttest:$FROM_TAG AS builder +FROM altinityinfra/fasttest:$FROM_TAG AS builder ENV CC=clang-${LLVM_VERSION} ENV CXX=clang++-${LLVM_VERSION} diff --git a/ci/docker/docs-builder/Dockerfile b/ci/docker/docs-builder/Dockerfile index 8835fe3a7070..59793ae16b7d 100644 --- a/ci/docker/docs-builder/Dockerfile +++ b/ci/docker/docs-builder/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/docs-builder . +# docker build -t altinityinfra/docs-builder . FROM node:20-bookworm-slim RUN apt-get update && \ diff --git a/ci/docker/fasttest/Dockerfile b/ci/docker/fasttest/Dockerfile index d0c7f30ee314..389a93419558 100644 --- a/ci/docker/fasttest/Dockerfile +++ b/ci/docker/fasttest/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/fasttest . +# docker build -t altinityinfra/fasttest . FROM ubuntu:22.04 # ARG for quick switch to a given ubuntu mirror @@ -110,7 +110,7 @@ RUN ARCH=$(uname -m) && \ rustup target add riscv64gc-unknown-linux-gnu # Note, libmpfr6 is also a requirement for gdb -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb +COPY --from=altinityinfra/cctools:33434f0a9e7e979bc907 /opt/gdb /opt/gdb # Give suid to gdb to grant it attach permissions RUN chmod u+s /opt/gdb/bin/gdb ENV PATH="/opt/gdb/bin:${PATH}" diff --git a/ci/docker/fuzzer/Dockerfile b/ci/docker/fuzzer/Dockerfile index 303dfc59fb5f..4aba4473b696 100644 --- a/ci/docker/fuzzer/Dockerfile +++ b/ci/docker/fuzzer/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 -# docker build -t clickhouse/fuzzer . +# docker build -t altinityinfra/fuzzer . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG ENV LANG=C.UTF-8 @@ -33,4 +33,4 @@ CMD set -o pipefail \ && cd /workspace \ && timeout --verbose --signal 9 1h /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log -# docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/fuzzer +# docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> altinityinfra/fuzzer diff --git a/ci/docker/integration/arrowflight/Dockerfile b/ci/docker/integration/arrowflight/Dockerfile index 10d5b4955d3c..0abb55f92110 100644 --- a/ci/docker/integration/arrowflight/Dockerfile +++ b/ci/docker/integration/arrowflight/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/arrowflight-server-test . +# docker build -t altinityinfra/arrowflight-server-test . FROM python:3.9-slim ENV PYTHONDONTWRITEBYTECODE=1 diff --git a/ci/docker/integration/base/Dockerfile b/ci/docker/integration/base/Dockerfile index 29af698b293c..4260bf6c85dd 100644 --- a/ci/docker/integration/base/Dockerfile +++ b/ci/docker/integration/base/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 -# docker build -t clickhouse/integration-test . +# docker build -t altinityinfra/integration-test . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG SHELL ["/bin/bash", "-c"] @@ -73,10 +73,10 @@ maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg && \ ENV TZ=Etc/UTC RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb +COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 /opt/gdb /opt/gdb ENV PATH="/opt/gdb/bin:${PATH}" -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \ +COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 \ /opt/openssl-fips/openssl.cnf \ /opt/openssl-fips/fipsmodule.cnf \ /opt/openssl-fips/fips.so \ diff --git a/ci/docker/integration/clickhouse_with_hms_catalog/Dockerfile b/ci/docker/integration/clickhouse_with_hms_catalog/Dockerfile index 40d107d0c28b..9337e28926a2 100644 --- a/ci/docker/integration/clickhouse_with_hms_catalog/Dockerfile +++ b/ci/docker/integration/clickhouse_with_hms_catalog/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/integration-test-with-hms . +# docker build -t altinityinfra/integration-test-with-hms . ARG FROM_TAG=latest FROM openjdk:8-jre-slim AS build diff --git a/ci/docker/integration/clickhouse_with_unity_catalog/Dockerfile b/ci/docker/integration/clickhouse_with_unity_catalog/Dockerfile index 50da25ddc78e..855746c23200 100644 --- a/ci/docker/integration/clickhouse_with_unity_catalog/Dockerfile +++ b/ci/docker/integration/clickhouse_with_unity_catalog/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/integration-test-with-unity-catalog . +# docker build -t altinityinfra/integration-test-with-unity-catalog . ARG FROM_TAG=latest FROM clickhouse/integration-test:$FROM_TAG diff --git a/ci/docker/integration/helper_container/Dockerfile b/ci/docker/integration/helper_container/Dockerfile index 1084d087e53b..81d658705836 100644 --- a/ci/docker/integration/helper_container/Dockerfile +++ b/ci/docker/integration/helper_container/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/integration-helper . +# docker build -t altinityinfra/integration-helper . # Helper docker container to run iptables without sudo FROM alpine:3.18 diff --git a/ci/docker/integration/kerberos_kdc/Dockerfile b/ci/docker/integration/kerberos_kdc/Dockerfile index a203c33a3313..a7f989bf4a56 100644 --- a/ci/docker/integration/kerberos_kdc/Dockerfile +++ b/ci/docker/integration/kerberos_kdc/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/kerberos-kdc . +# docker build -t altinityinfra/kerberos-kdc . FROM centos:6 RUN sed -i '/^mirrorlist/s/^/#/;/^#baseurl/{s/#//;s/mirror.centos.org\/centos\/$releasever/vault.centos.org\/6.10/}' /etc/yum.repos.d/*B* diff --git a/ci/docker/integration/mysql_dotnet_client/Dockerfile b/ci/docker/integration/mysql_dotnet_client/Dockerfile index 92d0c6ae585e..d1e38db65613 100644 --- a/ci/docker/integration/mysql_dotnet_client/Dockerfile +++ b/ci/docker/integration/mysql_dotnet_client/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/mysql_dotnet_client . +# docker build -t altinityinfra/mysql_dotnet_client . FROM ubuntu:22.04 diff --git a/ci/docker/integration/mysql_golang_client/Dockerfile b/ci/docker/integration/mysql_golang_client/Dockerfile index 5281f786ae2d..52be68126e47 100644 --- a/ci/docker/integration/mysql_golang_client/Dockerfile +++ b/ci/docker/integration/mysql_golang_client/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/mysql-golang-client . +# docker build -t altinityinfra/mysql-golang-client . # MySQL golang client docker container FROM golang:1.17 diff --git a/ci/docker/integration/mysql_java_client/Dockerfile b/ci/docker/integration/mysql_java_client/Dockerfile index 38fefac070e7..5826ee77d501 100644 --- a/ci/docker/integration/mysql_java_client/Dockerfile +++ b/ci/docker/integration/mysql_java_client/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/mysql-java-client . +# docker build -t altinityinfra/mysql-java-client . # MySQL Java client docker container FROM openjdk:8-jdk-alpine diff --git a/ci/docker/integration/mysql_js_client/Dockerfile b/ci/docker/integration/mysql_js_client/Dockerfile index 4c9df10ace1c..2b821f243234 100644 --- a/ci/docker/integration/mysql_js_client/Dockerfile +++ b/ci/docker/integration/mysql_js_client/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/mysql-js-client . +# docker build -t altinityinfra/mysql-js-client . # MySQL JavaScript client docker container FROM node:16.14.2 diff --git a/ci/docker/integration/mysql_php_client/Dockerfile b/ci/docker/integration/mysql_php_client/Dockerfile index 0e11ae023e63..b060e93f70a3 100644 --- a/ci/docker/integration/mysql_php_client/Dockerfile +++ b/ci/docker/integration/mysql_php_client/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/mysql-php-client . +# docker build -t altinityinfra/mysql-php-client . # MySQL PHP client docker container FROM php:8-cli-alpine diff --git a/ci/docker/integration/postgresql_java_client/Dockerfile b/ci/docker/integration/postgresql_java_client/Dockerfile index c5583085ef37..5a7458cc1d2f 100644 --- a/ci/docker/integration/postgresql_java_client/Dockerfile +++ b/ci/docker/integration/postgresql_java_client/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/postgresql-java-client . +# docker build -t altinityinfra/postgresql-java-client . # PostgreSQL Java client docker container FROM ubuntu:18.04 diff --git a/ci/docker/integration/resolver/Dockerfile b/ci/docker/integration/resolver/Dockerfile index 423faf835ae1..1f639bb2793d 100644 --- a/ci/docker/integration/resolver/Dockerfile +++ b/ci/docker/integration/resolver/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/python-bottle . +# docker build -t altinityinfra/python-bottle . # Helper docker container to run python bottle apps # python cgi module is dropped in 3.13 - pin to 3.12 diff --git a/ci/docker/integration/runner/Dockerfile b/ci/docker/integration/runner/Dockerfile index 71ef68d04b59..607bff7d1ce6 100644 --- a/ci/docker/integration/runner/Dockerfile +++ b/ci/docker/integration/runner/Dockerfile @@ -1,6 +1,6 @@ -# docker build -t clickhouse/integration-tests-runner . +# docker build -t altinityinfra/integration-tests-runner . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG # ARG for quick switch to a given ubuntu mirror ARG apt_archive="http://archive.ubuntu.com" @@ -93,10 +93,10 @@ RUN set -x \ COPY modprobe.sh /usr/local/bin/modprobe COPY misc/ /misc/ -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb +COPY --from=altinityinfra/cctools:33434f0a9e7e979bc907 /opt/gdb /opt/gdb ENV PATH="/opt/gdb/bin:${PATH}" -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \ +COPY --from=altinityinfra/cctools:33434f0a9e7e979bc907 \ /opt/openssl-fips/openssl.cnf \ /opt/openssl-fips/fipsmodule.cnf \ /opt/openssl-fips/fips.so \ diff --git a/ci/docker/integration/s3_proxy/Dockerfile b/ci/docker/integration/s3_proxy/Dockerfile index 5858218e4e4c..df8d8f00f216 100644 --- a/ci/docker/integration/s3_proxy/Dockerfile +++ b/ci/docker/integration/s3_proxy/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/s3-proxy . +# docker build -t altinityinfra/s3-proxy . FROM nginx:alpine COPY run.sh /run.sh diff --git a/ci/docker/keeper-jepsen-test/Dockerfile b/ci/docker/keeper-jepsen-test/Dockerfile index 3c5d0a6ecb42..6633d81193d5 100644 --- a/ci/docker/keeper-jepsen-test/Dockerfile +++ b/ci/docker/keeper-jepsen-test/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/keeper-jepsen-test . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG ENV DEBIAN_FRONTEND=noninteractive ENV CLOJURE_VERSION=1.10.3.814 diff --git a/ci/docker/libfuzzer/Dockerfile b/ci/docker/libfuzzer/Dockerfile index 26201e81def2..0da78bee9a67 100644 --- a/ci/docker/libfuzzer/Dockerfile +++ b/ci/docker/libfuzzer/Dockerfile @@ -1,6 +1,6 @@ # docker build -t clickhouse/libfuzzer . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG # ARG for quick switch to a given ubuntu mirror ARG apt_archive="http://archive.ubuntu.com" diff --git a/ci/docker/performance-comparison/Dockerfile b/ci/docker/performance-comparison/Dockerfile index dce18539d200..8384e9087104 100644 --- a/ci/docker/performance-comparison/Dockerfile +++ b/ci/docker/performance-comparison/Dockerfile @@ -1,7 +1,7 @@ -# docker build -t clickhouse/performance-comparison . +# docker build -t altinityinfra/performance-comparison . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG RUN apt-get update \ && DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \ @@ -40,7 +40,7 @@ RUN apt-get update \ COPY requirements.txt / RUN pip3 --no-cache-dir install -r requirements.txt -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb +COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 /opt/gdb /opt/gdb ENV PATH="/opt/gdb/bin:${PATH}" # aws cli to acquire secrets and params from ssm diff --git a/ci/docker/server-jepsen-test/Dockerfile b/ci/docker/server-jepsen-test/Dockerfile index fd70fc457020..54a4626e2892 100644 --- a/ci/docker/server-jepsen-test/Dockerfile +++ b/ci/docker/server-jepsen-test/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/server-jepsen-test . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG ENV DEBIAN_FRONTEND=noninteractive ENV CLOJURE_VERSION=1.10.3.814 diff --git a/ci/docker/sqlancer-test/Dockerfile b/ci/docker/sqlancer-test/Dockerfile index 2aa5aba9788d..3c5cea2ef7e0 100644 --- a/ci/docker/sqlancer-test/Dockerfile +++ b/ci/docker/sqlancer-test/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/sqlancer-test . +# docker build -t altinityinfra/sqlancer-test . FROM ubuntu:22.04 # ARG for quick switch to a given ubuntu mirror diff --git a/ci/docker/stateless-test/Dockerfile b/ci/docker/stateless-test/Dockerfile index cfc747cf9826..450e3878b2a8 100644 --- a/ci/docker/stateless-test/Dockerfile +++ b/ci/docker/stateless-test/Dockerfile @@ -1,6 +1,6 @@ -# docker build -t clickhouse/stateless-test . +# docker build -t altinityinfra/stateless-test . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz" @@ -115,10 +115,10 @@ ENV PYTHONPATH=".:./ci" # A directory for cache RUN mkdir /dev/shm/clickhouse -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb +COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 /opt/gdb /opt/gdb ENV PATH="/opt/gdb/bin:${PATH}" -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \ +COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 \ /opt/openssl-fips/openssl.cnf \ /opt/openssl-fips/fipsmodule.cnf \ /opt/openssl-fips/fips.so \ diff --git a/ci/docker/stress-test/Dockerfile b/ci/docker/stress-test/Dockerfile index 866480f27a8b..0b0a8fcba8e6 100644 --- a/ci/docker/stress-test/Dockerfile +++ b/ci/docker/stress-test/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 -# docker build -t clickhouse/stress-test . +# docker build -t altinityinfra/stress-test . ARG FROM_TAG=latest -FROM clickhouse/stateless-test:$FROM_TAG +FROM altinityinfra/stateless-test:$FROM_TAG RUN apt-get update -y \ && env DEBIAN_FRONTEND=noninteractive \ diff --git a/ci/docker/stress-test/README.md b/ci/docker/stress-test/README.md index fe73555fbd23..3d0fa2c9f467 100644 --- a/ci/docker/stress-test/README.md +++ b/ci/docker/stress-test/README.md @@ -6,7 +6,7 @@ Usage: ``` $ ls $HOME/someclickhouse clickhouse-client_18.14.9_all.deb clickhouse-common-static_18.14.9_amd64.deb clickhouse-server_18.14.9_all.deb -$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output clickhouse/stress-test +$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output altinityinfra/stress-test Selecting previously unselected package clickhouse-common-static. (Reading database ... 14442 files and directories currently installed.) ... diff --git a/ci/docker/style-test/Dockerfile b/ci/docker/style-test/Dockerfile index 9c83329c0db7..7bb4e502cc4b 100644 --- a/ci/docker/style-test/Dockerfile +++ b/ci/docker/style-test/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/style-test . +# docker build -t altinityinfra/style-test . FROM ubuntu:22.04 RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ diff --git a/ci/docker/test-base/Dockerfile b/ci/docker/test-base/Dockerfile index 21f8bac5548a..2a5fc662c2ff 100644 --- a/ci/docker/test-base/Dockerfile +++ b/ci/docker/test-base/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/test-base . +# docker build -t altinityinfra/test-base . FROM ubuntu:22.04 # ARG for quick switch to a given ubuntu mirror @@ -76,10 +76,10 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* # Note, libmpfr6 is also a requirement for gdb -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb +COPY --from=altinityinfra/cctools:33434f0a9e7e979bc907 /opt/gdb /opt/gdb ENV PATH="/opt/gdb/bin:${PATH}" -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \ +COPY --from=altinityinfra/cctools:33434f0a9e7e979bc907 \ /opt/openssl-fips/openssl.cnf \ /opt/openssl-fips/fipsmodule.cnf \ /opt/openssl-fips/fips.so \ diff --git a/ci/jobs/ast_fuzzer_job.py b/ci/jobs/ast_fuzzer_job.py index e36ac37be585..ccb8fb09bf9a 100644 --- a/ci/jobs/ast_fuzzer_job.py +++ b/ci/jobs/ast_fuzzer_job.py @@ -12,7 +12,7 @@ from ci.praktika.result import Result from ci.praktika.utils import Shell, Utils -IMAGE_NAME = "clickhouse/fuzzer" +IMAGE_NAME = "altinityinfra/fuzzer" # Maximum number of reproduce commands to display inline before writing to file MAX_INLINE_REPRODUCE_COMMANDS = 20 diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index 84bfd71aa516..a7948079b28b 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -117,12 +117,13 @@ def main(): os.environ["CTCACHE_S3_BUCKET"] = Settings.S3_ARTIFACT_PATH os.environ["CTCACHE_S3_FOLDER"] = "ccache/clang-tidy-cache" - os.environ["CH_HOSTNAME"] = ( - "https://build-cache.eu-west-1.aws.clickhouse-staging.com" - ) - os.environ["CH_USER"] = "ci_builder" - os.environ["CH_PASSWORD"] = chcache_secret.get_value() - os.environ["CH_USE_LOCAL_CACHE"] = "false" + # NOTE (strtgbb): Not used yet, but we should look into setting up the secrets for it + # os.environ["CH_HOSTNAME"] = ( + # "https://build-cache.eu-west-1.aws.clickhouse-staging.com" + # ) + # os.environ["CH_USER"] = "ci_builder" + # os.environ["CH_PASSWORD"] = chcache_secret.get_value() + # os.environ["CH_USE_LOCAL_CACHE"] = "false" if info.pr_number == 0: cmake_cmd += " -DCLICKHOUSE_OFFICIAL_BUILD=1" diff --git a/ci/jobs/clickbench.py b/ci/jobs/clickbench.py index a34f2ac27bdb..3ff161d40560 100644 --- a/ci/jobs/clickbench.py +++ b/ci/jobs/clickbench.py @@ -20,7 +20,7 @@ def install(): res = ch.install_clickbench_config() if info.is_local_run: return res - return res and ch.create_log_export_config() + return res # and ch.create_log_export_config() results.append( Result.from_commands_run(name="Install ClickHouse", command=install) @@ -34,7 +34,7 @@ def start(): res = ch.start_light() if info.is_local_run: return res - return res and ch.start_log_exports(check_start_time=stop_watch.start_time) + return res # and ch.start_log_exports(check_start_time=stop_watch.start_time) results.append( Result.from_commands_run( diff --git a/ci/jobs/fast_test.py b/ci/jobs/fast_test.py index 72d6b8142a4a..6266abf3b968 100644 --- a/ci/jobs/fast_test.py +++ b/ci/jobs/fast_test.py @@ -261,7 +261,9 @@ def main(): print(step_name) res = res and CH.run_fast_test(test=args.test or "") if res: - results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run()) + results.append( + FTResultsProcessor(wd=Settings.OUTPUT_DIR, test_options=["fast"]).run() + ) results[-1].set_timing(stopwatch=stop_watch_) else: results.append( diff --git a/ci/jobs/functional_tests.py b/ci/jobs/functional_tests.py index 60024d77884e..0c721f9f985c 100644 --- a/ci/jobs/functional_tests.py +++ b/ci/jobs/functional_tests.py @@ -1,5 +1,6 @@ import argparse import os +import re import random import subprocess from pathlib import Path @@ -123,6 +124,7 @@ def run_tests( "azure": " --azure-blob-storage --no-random-settings --no-random-merge-tree-settings", # azurite is slow, with randomization it can be super slow "parallel": "--no-sequential", "sequential": "--no-parallel", + "amd_tsan": " --timeout 1200", # NOTE (strtgbb): tsan is slow, increase the timeout to avoid timeout errors "flaky check": "--flaky-check", "targeted": "--flaky-check", # to disable tests not compatible with the thread fuzzer } @@ -227,10 +229,12 @@ def main(): if not info.is_local_run: # TODO: find a way to work with Azure secret so it's ok for local tests as well, for now keep azure disabled - os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( - f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", - verbose=True, - ) + # os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( + # f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", + # verbose=True, + # ) + # NOTE(strtgbb): We pass azure credentials through the docker command, not SSM. + pass else: print("Disable azure for a local run") config_installs_args += " --no-azure" @@ -376,12 +380,13 @@ def main(): if res and JobStages.INSTALL_CLICKHOUSE in stages: - def configure_log_export(): - if not info.is_local_run: - print("prepare log export config") - return CH.create_log_export_config() - else: - print("skip log export config for local run") + # NOTE (strtgbb): Disable log export throughout this file, it depends on aws ssm, which we don't have configured + # def configure_log_export(): + # if not info.is_local_run: + # print("prepare log export config") + # return CH.create_log_export_config() + # else: + # print("skip log export config for local run") commands = [ f"rm -rf /etc/clickhouse-client/* /etc/clickhouse-server/*", @@ -410,8 +415,8 @@ def configure_log_export(): f"prof_active:true,prof_prefix:{temp_dir}/jemalloc_profiles/clickhouse.jemalloc" ) - if not is_coverage: - commands.append(configure_log_export) + # if not is_coverage: + # commands.append(configure_log_export) results.append( Result.from_commands_run(name="Install ClickHouse", command=commands) @@ -432,12 +437,13 @@ def start(): res = res and CH.start() res = res and CH.wait_ready() if res: - if not Info().is_local_run: - if not CH.start_log_exports(stop_watch.start_time): - info.add_workflow_report_message( - "WARNING: Failed to start log export" - ) - print("Failed to start log export") + # Note (strtgbb): We don't use this + # if not Info().is_local_run: + # if not CH.start_log_exports(stop_watch.start_time): + # info.add_workflow_report_message( + # "WARNING: Failed to start log export" + # ) + # print("Failed to start log export") if not CH.create_minio_log_tables(): info.add_workflow_report_message( "WARNING: Failed to create minio log tables" @@ -483,7 +489,7 @@ def start(): run_sets_cnt = rerun_count if is_targeted_check else 1 rerun_count = 1 if is_targeted_check else rerun_count - ft_res_processor = FTResultsProcessor(wd=temp_dir) + ft_res_processor = FTResultsProcessor(wd=temp_dir, test_options=test_options) # Track collected test results across multiple runs (only used when run_sets_cnt > 1) collected_test_results = [] @@ -563,7 +569,9 @@ def start(): ) ) elif failed_tests: - ft_res_processor = FTResultsProcessor(wd=temp_dir) + ft_res_processor = FTResultsProcessor( + wd=temp_dir, test_options=test_options + ) run_tests( batch_num=0, batch_total=0, diff --git a/ci/jobs/fuzzers_job.py b/ci/jobs/fuzzers_job.py index 666178c0da60..e699063fbf31 100644 --- a/ci/jobs/fuzzers_job.py +++ b/ci/jobs/fuzzers_job.py @@ -33,8 +33,9 @@ def start(): # TODO: attach gdb # and ch.attach_gdb() - if ch.create_log_export_config(): - ch.start_log_exports(check_start_time=stop_watch.start_time) + # Note (strtgbb): We don't use this + # if ch.create_log_export_config(): + # ch.start_log_exports(check_start_time=stop_watch.start_time) if res: print("AST Fuzzer") diff --git a/ci/jobs/install_check.py b/ci/jobs/install_check.py index dc4fc98a74b4..501755d6db42 100644 --- a/ci/jobs/install_check.py +++ b/ci/jobs/install_check.py @@ -6,8 +6,8 @@ from ci.praktika.result import Result from ci.praktika.utils import Shell, Utils -RPM_IMAGE = "clickhouse/install-rpm-test" -DEB_IMAGE = "clickhouse/install-deb-test" +RPM_IMAGE = "altinityinfra/install-rpm-test" +DEB_IMAGE = "altinityinfra/install-deb-test" REPO_PATH = Utils.cwd() TEMP_PATH = Path(f"{REPO_PATH}/ci/tmp/") @@ -23,7 +23,10 @@ def prepare_test_scripts(): # listening manually here. systemctl restart clickhouse-server clickhouse-client -q 'SELECT version()' -grep "$test_env" /proc/$(cat /var/run/clickhouse-server/clickhouse-server.pid)/environ""" +grep "$test_env" /proc/$(cat /var/run/clickhouse-server/clickhouse-server.pid)/environ +echo "Check Stacktrace" +output=$(clickhouse-local --stacktrace --query="SELECT throwIf(1,'throw')" 2>&1 >/dev/null || true) +echo "$output" | grep 'FunctionThrowIf::executeImpl'""" initd_via_systemd_test = r"""#!/bin/bash set -e trap "bash -ex /packages/preserve_logs.sh" ERR diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py index 19e42c116cb1..7582c2dcac21 100644 --- a/ci/jobs/scripts/clickhouse_proc.py +++ b/ci/jobs/scripts/clickhouse_proc.py @@ -134,14 +134,16 @@ def start_minio(self, test_type): ) print(f"Started setup_minio.sh asynchronously with PID {self.minio_proc.pid}") - for _ in range(20): + print("Waiting for minio to start...") + for _ in range(10): res = Shell.check( "/mc ls clickminio/test | grep -q .", verbose=True, ) if res: + print("Minio started successfully") return True - time.sleep(1) + time.sleep(3) print("Failed to start minio") return False @@ -661,16 +663,17 @@ def run_fast_test(self, test=""): return exit_code == 0 def terminate(self): - if self.minio_proc: - # remove the webhook so it doesn't spam with errors once we stop ClickHouse - Shell.check( - "/mc admin config reset clickminio logger_webhook:ch_server_webhook", - verbose=True, - ) - Shell.check( - "/mc admin config reset clickminio audit_webhook:ch_audit_webhook", - verbose=True, - ) + # NOTE (strtgbb): Log tables are disabled, we don't use them + # if self.minio_proc: + # # remove the webhook so it doesn't spam with errors once we stop ClickHouse + # Shell.check( + # "/mc admin config reset clickminio logger_webhook:ch_server_webhook", + # verbose=True, + # ) + # Shell.check( + # "/mc admin config reset clickminio audit_webhook:ch_audit_webhook", + # verbose=True, + # ) self._flush_system_logs() @@ -1041,8 +1044,8 @@ def dump_system_tables(self): "error_log", "query_metric_log", "part_log", - "minio_audit_logs", - "minio_server_logs", + # "minio_audit_logs", # NOTE (strtgbb): we do not use these logs + # "minio_server_logs", ] ROWS_COUNT_IN_SYSTEM_TABLE_LIMIT = 10_000_000 @@ -1251,6 +1254,7 @@ def set_random_timezone(): else: res = True elif command == "logs_export_start": + exit(0) # Note (strtgbb): We don't use log exports # FIXME: the start_time must be preserved globally in ENV or something like that # to get the same values in different DBs # As a wild idea, it could be stored in a Info.check_start_timestamp @@ -1261,6 +1265,7 @@ def set_random_timezone(): else: res = True elif command == "logs_export_stop": + exit(0) # Note (strtgbb): We don't use log exports if not Info().is_local_run: # Disable log export for local runs - ideally this command wouldn't be triggered, # but conditional disabling is complex in legacy bash scripts (run_fuzzer.sh, stress_runner.sh) diff --git a/ci/jobs/scripts/clickhouse_version.py b/ci/jobs/scripts/clickhouse_version.py index 205858193306..a53d75ecbb64 100644 --- a/ci/jobs/scripts/clickhouse_version.py +++ b/ci/jobs/scripts/clickhouse_version.py @@ -1,9 +1,21 @@ import re +import sys +import os from pathlib import Path from ci.praktika.info import Info from ci.praktika.utils import Shell +# NOTE(vnemkov): extremely hackish, buts allows to reuse code from version_helper and git_helper with our modifications. + +# allow to import other packages that are located in `tests/ci` directory, like `git_helper` +import tests.ci +sys.path.append(os.path.abspath(tests.ci.__path__._path[0])) +from tests.ci.version_helper import ( + read_versions, + get_version_from_repo, + get_version_from_tag +) class CHVersion: FILE_WITH_VERSION_PATH = "./cmake/autogenerated_versions.txt" @@ -15,6 +27,8 @@ class CHVersion: SET(VERSION_MINOR {minor}) SET(VERSION_PATCH {patch}) SET(VERSION_GITHASH {githash}) +SET(VERSION_TWEAK {tweak}) +SET(VERSION_FLAVOUR {flavour}) SET(VERSION_DESCRIBE {describe}) SET(VERSION_STRING {string}) """ @@ -41,6 +55,8 @@ def get_release_version_as_dict(cls): "patch": versions["patch"], "revision": versions["revision"], "githash": versions["githash"], + "tweak": versions["tweak"], + "flavour": versions["flavour"], "describe": versions["describe"], "string": versions["string"], } diff --git a/ci/jobs/scripts/functional_tests_results.py b/ci/jobs/scripts/functional_tests_results.py index 4a95e7254b2b..a1fc66c202d1 100755 --- a/ci/jobs/scripts/functional_tests_results.py +++ b/ci/jobs/scripts/functional_tests_results.py @@ -1,7 +1,10 @@ import dataclasses import re +import json +import os import traceback from typing import List +import yaml from praktika.result import Result @@ -27,6 +30,100 @@ r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} ([\w\-\.]+):\s+(\[ (?:OK|FAIL|SKIPPED|UNKNOWN|Timeout!) \])\s+([\d.]+) sec\." ) +def get_broken_tests_rules() -> dict: + broken_tests_file_path = "tests/broken_tests.yaml" + if ( + not os.path.isfile(broken_tests_file_path) + or os.path.getsize(broken_tests_file_path) == 0 + ): + raise ValueError( + "There is something wrong with getting broken tests rules: " + f"file '{broken_tests_file_path}' is empty or does not exist." + ) + + with open(broken_tests_file_path, "r", encoding="utf-8") as broken_tests_file: + broken_tests = yaml.safe_load(broken_tests_file) + + compiled_rules = {"exact": {}, "pattern": {}} + + for test in broken_tests: + regex = test.get("regex") is True + rule = { + "reason": test["reason"], + } + + if test.get("message"): + rule["message"] = re.compile(test["message"]) if regex else test["message"] + + if test.get("not_message"): + rule["not_message"] = ( + re.compile(test["not_message"]) if regex else test["not_message"] + ) + if test.get("check_types"): + rule["check_types"] = test["check_types"] + + if regex: + rule["regex"] = True + compiled_rules["pattern"][re.compile(test["name"])] = rule + else: + compiled_rules["exact"][test["name"]] = rule + + print( + f"INFO: Compiled {len(compiled_rules['exact'])} exact rules and {len(compiled_rules['pattern'])} pattern rules" + ) + + return compiled_rules + + +def test_is_known_fail(test_name, test_logs, known_broken_tests, test_options_string): + matching_rules = [] + + print(f"Checking known broken tests for failed test: {test_name}") + print("Potential matching rules:") + exact_rule = known_broken_tests["exact"].get(test_name) + if exact_rule: + print(f"{test_name} - {exact_rule}") + matching_rules.append(exact_rule) + + for name_re, data in known_broken_tests["pattern"].items(): + if name_re.fullmatch(test_name): + print(f"{name_re} - {data}") + matching_rules.append(data) + + if not matching_rules: + return False + + def matches_substring(substring, log, is_regex): + if log is None: + return False + if is_regex: + return bool(substring.search(log)) + return substring in log + + for rule_data in matching_rules: + if rule_data.get("check_types") and not any( + ct in test_options_string for ct in rule_data["check_types"] + ): + print( + f"Check types didn't match: '{rule_data['check_types']}' not in '{test_options_string}'" + ) + continue # check_types didn't match → skip rule + + is_regex = rule_data.get("regex", False) + not_message = rule_data.get("not_message") + if not_message and matches_substring(not_message, test_logs, is_regex): + print(f"Skip rule: Not message matched: '{rule_data['not_message']}'") + continue # not_message matched → skip rule + message = rule_data.get("message") + if message and not matches_substring(message, test_logs, is_regex): + print(f"Skip rule: Message didn't match: '{rule_data['message']}'") + continue + + print(f"Test {test_name} matched rule: {rule_data}") + return rule_data["reason"] + + return False + class FTResultsProcessor: @dataclasses.dataclass @@ -36,6 +133,7 @@ class Summary: unknown: int failed: int success: int + broken: int test_results: List[Result] hung: bool = False server_died: bool = False @@ -43,9 +141,10 @@ class Summary: success_finish: bool = False test_end: bool = True - def __init__(self, wd): + def __init__(self, wd, test_options): self.tests_output_file = f"{wd}/test_result.txt" self.debug_files = [] + self.test_options = test_options def _process_test_output(self): total = 0 @@ -53,6 +152,7 @@ def _process_test_output(self): unknown = 0 failed = 0 success = 0 + broken = 0 hung = False server_died = False retries = False @@ -60,6 +160,8 @@ def _process_test_output(self): test_results = [] test_end = True + known_broken_tests = get_broken_tests_rules() + with open(self.tests_output_file, "r", encoding="utf-8") as test_file: for line in test_file: original_line = line @@ -123,6 +225,8 @@ def _process_test_output(self): if DATABASE_SIGN in line: test_end = True + test_options_string = ", ".join(self.test_options) + test_results_ = [] for test in test_results: try: @@ -135,6 +239,21 @@ def _process_test_output(self): info="".join(test[3])[:16384], ) ) + + if test[1] == "FAIL": + broken_message = test_is_known_fail( + test[0], + test_results_[-1].info, + known_broken_tests, + test_options_string, + ) + + if broken_message: + broken += 1 + failed -= 1 + test_results_[-1].set_status(Result.StatusExtended.BROKEN) + test_results_[-1].info += "\nMarked as broken: " + broken_message + except Exception as e: print(f"ERROR: Failed to parse test results: {test}") traceback.print_exc() @@ -160,6 +279,7 @@ def _process_test_output(self): unknown=unknown, failed=failed, success=success, + broken=broken, test_results=test_results, hung=hung, server_died=server_died, @@ -197,7 +317,7 @@ def run(self, task_name="Tests"): pass if not info: - info = f"Failed: {s.failed}, Passed: {s.success}, Skipped: {s.skipped}" + info = f"Failed: {s.failed}, Passed: {s.success}, Skipped: {s.skipped}, Broken: {s.broken}" result = Result.create_from( name=task_name, diff --git a/ci/jobs/scripts/fuzzer/run-fuzzer.sh b/ci/jobs/scripts/fuzzer/run-fuzzer.sh index 7704723eb366..8ca6cc72a34c 100755 --- a/ci/jobs/scripts/fuzzer/run-fuzzer.sh +++ b/ci/jobs/scripts/fuzzer/run-fuzzer.sh @@ -58,8 +58,8 @@ EOL $PWD EOL - - (cd $repo_dir && python3 $repo_dir/ci/jobs/scripts/clickhouse_proc.py logs_export_config) || echo "Failed to create log export config" + # NOTE (strtgbb): Log tables are disabled, we don't use them + # (cd $repo_dir && python3 $repo_dir/ci/jobs/scripts/clickhouse_proc.py logs_export_config) || echo "Failed to create log export config" } function filter_exists_and_template @@ -183,7 +183,8 @@ function fuzz echo 'Server started and responded.' - (cd $repo_dir && python3 $repo_dir/ci/jobs/scripts/clickhouse_proc.py logs_export_start) || echo "Failed to start log exports" + # NOTE (strtgbb): Log tables are disabled, we don't use them + # (cd $repo_dir && python3 $repo_dir/ci/jobs/scripts/clickhouse_proc.py logs_export_start) || echo "Failed to start log exports" # Setup arguments for the fuzzer FUZZER_OUTPUT_SQL_FILE='' diff --git a/ci/jobs/scripts/integration_tests_configs.py b/ci/jobs/scripts/integration_tests_configs.py index eea01ad5ce26..967eeade28a9 100644 --- a/ci/jobs/scripts/integration_tests_configs.py +++ b/ci/jobs/scripts/integration_tests_configs.py @@ -35,24 +35,24 @@ class TC: ] IMAGES_ENV = { - "clickhouse/dotnet-client": "DOCKER_DOTNET_CLIENT_TAG", - "clickhouse/integration-helper": "DOCKER_HELPER_TAG", - "clickhouse/integration-test": "DOCKER_BASE_TAG", - "clickhouse/kerberos-kdc": "DOCKER_KERBEROS_KDC_TAG", - "clickhouse/test-mysql80": "DOCKER_TEST_MYSQL80_TAG", - "clickhouse/test-mysql57": "DOCKER_TEST_MYSQL57_TAG", - "clickhouse/mysql-golang-client": "DOCKER_MYSQL_GOLANG_CLIENT_TAG", - "clickhouse/mysql-java-client": "DOCKER_MYSQL_JAVA_CLIENT_TAG", - "clickhouse/mysql-js-client": "DOCKER_MYSQL_JS_CLIENT_TAG", - "clickhouse/arrowflight-server-test": "DOCKER_ARROWFLIGHT_SERVER_TAG", - "clickhouse/mysql-php-client": "DOCKER_MYSQL_PHP_CLIENT_TAG", - "clickhouse/nginx-dav": "DOCKER_NGINX_DAV_TAG", - "clickhouse/postgresql-java-client": "DOCKER_POSTGRESQL_JAVA_CLIENT_TAG", - "clickhouse/python-bottle": "DOCKER_PYTHON_BOTTLE_TAG", - "clickhouse/integration-test-with-unity-catalog": "DOCKER_BASE_WITH_UNITY_CATALOG_TAG", - "clickhouse/integration-test-with-hms": "DOCKER_BASE_WITH_HMS_TAG", - "clickhouse/mysql_dotnet_client": "DOCKER_MYSQL_DOTNET_CLIENT_TAG", - "clickhouse/s3-proxy": "DOCKER_S3_PROXY_TAG", + "altinityinfra/dotnet-client": "DOCKER_DOTNET_CLIENT_TAG", + "altinityinfra/integration-helper": "DOCKER_HELPER_TAG", + "altinityinfra/integration-test": "DOCKER_BASE_TAG", + "altinityinfra/kerberos-kdc": "DOCKER_KERBEROS_KDC_TAG", + "altinityinfra/test-mysql80": "DOCKER_TEST_MYSQL80_TAG", + "altinityinfra/test-mysql57": "DOCKER_TEST_MYSQL57_TAG", + "altinityinfra/mysql-golang-client": "DOCKER_MYSQL_GOLANG_CLIENT_TAG", + "altinityinfra/mysql-java-client": "DOCKER_MYSQL_JAVA_CLIENT_TAG", + "altinityinfra/mysql-js-client": "DOCKER_MYSQL_JS_CLIENT_TAG", + "altinityinfra/arrowflight-server-test": "DOCKER_ARROWFLIGHT_SERVER_TAG", + "altinityinfra/mysql-php-client": "DOCKER_MYSQL_PHP_CLIENT_TAG", + "altinityinfra/nginx-dav": "DOCKER_NGINX_DAV_TAG", + "altinityinfra/postgresql-java-client": "DOCKER_POSTGRESQL_JAVA_CLIENT_TAG", + "altinityinfra/python-bottle": "DOCKER_PYTHON_BOTTLE_TAG", + "altinityinfra/integration-test-with-unity-catalog": "DOCKER_BASE_WITH_UNITY_CATALOG_TAG", + "altinityinfra/integration-test-with-hms": "DOCKER_BASE_WITH_HMS_TAG", + "altinityinfra/mysql_dotnet_client": "DOCKER_MYSQL_DOTNET_CLIENT_TAG", + "altinityinfra/s3-proxy": "DOCKER_S3_PROXY_TAG", } diff --git a/ci/jobs/scripts/workflow_hooks/filter_job.py b/ci/jobs/scripts/workflow_hooks/filter_job.py index 8e4239a4f892..0893c538b0b1 100644 --- a/ci/jobs/scripts/workflow_hooks/filter_job.py +++ b/ci/jobs/scripts/workflow_hooks/filter_job.py @@ -32,10 +32,10 @@ def only_docs(changed_files): ] PRELIMINARY_JOBS = [ - JobNames.STYLE_CHECK, + # JobNames.STYLE_CHECK, JobNames.FAST_TEST, - "Build (amd_tidy)", - "Build (arm_tidy)", + # "Build (amd_tidy)", + # "Build (arm_tidy)", ] INTEGRATION_TEST_FLAKY_CHECK_JOBS = [ @@ -179,6 +179,11 @@ def should_skip_job(job_name): return False, "" return True, "Skipped, not labeled with 'pr-performance'" + ci_exclude_tags = _info_cache.get_kv_data("ci_exclude_tags") or [] + for tag in ci_exclude_tags: + if tag in job_name: + return True, f"Skipped, job name includes excluded tag '{tag}'" + # If only the functional tests script changed, run only the first batch of stateless tests if changed_files and all( f.startswith("ci/") and f.endswith(".py") for f in changed_files diff --git a/ci/jobs/scripts/workflow_hooks/parse_ci_tags.py b/ci/jobs/scripts/workflow_hooks/parse_ci_tags.py new file mode 100644 index 000000000000..c28f59b552ee --- /dev/null +++ b/ci/jobs/scripts/workflow_hooks/parse_ci_tags.py @@ -0,0 +1,18 @@ +import re + +from ci.praktika.info import Info + + +def get_ci_tags(pr_body, tag_prefix): + pattern = rf"(- \[x\] + - true - true - https://crash.clickhouse.com/ + false + false + diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index 9c6817f7461c..7a60dd53c0e0 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -3,7 +3,7 @@ ClickHouse Dashboard - +