diff --git a/.github/workflows/atex.yaml b/.github/workflows/atex.yaml
new file mode 100644
index 000000000000..7974530534fc
--- /dev/null
+++ b/.github/workflows/atex.yaml
@@ -0,0 +1,308 @@
+name: ATEX Testing Farm Integration
+
+on:
+ pull_request:
+ types: [opened, synchronize, reopened]
+
+env:
+ ATEX_REPO: RHSecurityCompliance/atex-results-testing-farm
+ ATEX_HTML_REPO: RHSecurityCompliance/atex-html
+ CONTEST_REPO: RHSecurityCompliance/contest
+ ARTIFACT_RETENTION_DAYS: 1
+ TEST_TIMEOUT: 1440 # 24 hours
+
+jobs:
+ build_content:
+ name: Build content for CentOS Stream ${{ matrix.centos_stream_major }}
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ centos_stream_major: [8, 9, 10]
+ container:
+ image: fedora:latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Install system dependencies
+ run: |
+ dnf install -y \
+ cmake make openscap-utils python3-pyyaml \
+ bats ansible python3-pip ShellCheck git \
+ gcc gcc-c++ python3-devel libxml2-devel \
+ libxslt-devel python3-setuptools gawk
+
+ - name: Install Python dependencies
+ run: pip install pcre2==0.4.0 -r requirements.txt -r test-requirements.txt
+
+ - name: Build content
+ env:
+ CENTOS_STREAM_MAJOR: ${{ matrix.centos_stream_major }}
+ run: |
+ rm -rf build
+ mkdir build
+ cd build
+
+ # Build configuration matching Contest and scap-security-guide.spec defaults
+ # Includes options required by tests to avoid rebuilds
+ cmake ../ \
+ -DCMAKE_BUILD_TYPE:STRING=Release \
+ -DSSG_CENTOS_DERIVATIVES_ENABLED:BOOL=ON \
+ -DSSG_PRODUCT_DEFAULT:BOOL=OFF \
+ "-DSSG_PRODUCT_RHEL${CENTOS_STREAM_MAJOR}:BOOL=ON" \
+ -DSSG_SCE_ENABLED:BOOL=ON \
+ -DSSG_BASH_SCRIPTS_ENABLED:BOOL=OFF \
+ -DSSG_BUILD_DISA_DELTA_FILES:BOOL=OFF \
+ -DSSG_SEPARATE_SCAP_FILES_ENABLED:BOOL=OFF \
+ -DSSG_ANSIBLE_PLAYBOOKS_PER_RULE_ENABLED:BOOL=ON
+
+ # Build using all available cores
+ cores=$(nproc) || cores=4
+ make "-j$cores"
+
+ # Clean up temporary metadata
+ rm -rf jinja2_cache
+
+ - name: Upload build artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: content-centos-stream${{ matrix.centos_stream_major }}
+ path: .
+ retention-days: ${{ env.ARTIFACT_RETENTION_DAYS }}
+
+ test:
+ name: Test on CentOS Stream ${{ matrix.centos_stream_major }}
+ runs-on: ubuntu-latest
+ needs: build_content
+ strategy:
+ matrix:
+ centos_stream_major: [8, 9, 10]
+ container:
+ image: fedora:latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Download build artifacts
+ uses: actions/download-artifact@v4
+ with:
+ name: content-centos-stream${{ matrix.centos_stream_major }}
+ path: content-centos-stream${{ matrix.centos_stream_major }}/
+
+ - name: Checkout Contest framework
+ uses: actions/checkout@v4
+ with:
+ repository: ${{ env.CONTEST_REPO }}
+ ref: main
+ path: contest
+ fetch-depth: 1
+
+ - name: Install test dependencies
+ run: |
+ dnf -y install python3-pip git rsync
+ pip install fmf git+https://github.com/RHSecurityCompliance/atex.git
+
+ - name: Run tests on Testing Farm
+ env:
+ TESTING_FARM_API_TOKEN: ${{ secrets.TESTING_FARM_API_TOKEN }}
+ CS_MAJOR: ${{ matrix.centos_stream_major }}
+ run: |
+ python3 tests/run_tests_testingfarm.py \
+ --contest-dir contest \
+ --content-dir content-centos-stream${CS_MAJOR} \
+ --plan "/plans/daily" \
+ --tests "/hardening/host-os/oscap/stig" \
+ --compose "CentOS-Stream-${CS_MAJOR}" \
+ --arch x86_64 \
+ --os-major-version "${CS_MAJOR}" \
+ --timeout ${{ env.TEST_TIMEOUT }}
+
+ - name: Upload test results
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: test-results-centos-stream${{ matrix.centos_stream_major }}
+ path: |
+ results-centos-stream-${{ matrix.centos_stream_major }}-x86_64.json.gz
+ files-centos-stream-${{ matrix.centos_stream_major }}-x86_64/
+ atex_debug.log.gz
+ retention-days: ${{ env.ARTIFACT_RETENTION_DAYS }}
+
+ upload:
+ name: Upload and publish test results
+ runs-on: ubuntu-latest
+ needs: test
+ if: always() # Run even if tests fail
+ container:
+ image: fedora:latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Install dependencies
+ if: always()
+ run: |
+ dnf -y install python3-pip git rsync
+ pip install fmf git+https://github.com/RHSecurityCompliance/atex.git
+
+ - name: Checkout ATEX results repository
+ if: always()
+ uses: actions/checkout@v4
+ with:
+ repository: ${{ env.ATEX_REPO }}
+ ref: main
+ path: atex-results-testing-farm
+ token: ${{ secrets.ATEX_RESULTS_TF_REPO_TOKEN }}
+
+ - name: Initialize FMF metadata
+ if: always()
+ working-directory: atex-results-testing-farm
+ run: fmf init
+
+ - name: Create TMT dummy plan for artifact transport
+ if: always()
+ working-directory: atex-results-testing-farm
+ run: |
+ cat > main.fmf <<'EOF'
+ /dummy_plan:
+ discover:
+ how: shell
+ tests:
+ - name: /dummy_test
+ test: mv * "$TMT_TEST_DATA/."
+ execute:
+ how: tmt
+ EOF
+
+ # Download test results for all CentOS Stream versions
+ - name: Download test results - CentOS Stream 8
+ if: always()
+ uses: actions/download-artifact@v4
+ with:
+ name: test-results-centos-stream8
+ path: test-results/cs8/
+ continue-on-error: true
+
+ - name: Download test results - CentOS Stream 9
+ if: always()
+ uses: actions/download-artifact@v4
+ with:
+ name: test-results-centos-stream9
+ path: test-results/cs9/
+ continue-on-error: true
+
+ - name: Download test results - CentOS Stream 10
+ if: always()
+ uses: actions/download-artifact@v4
+ with:
+ name: test-results-centos-stream10
+ path: test-results/cs10/
+ continue-on-error: true
+
+ - name: Checkout ATEX HTML viewer
+ if: always()
+ uses: actions/checkout@v4
+ with:
+ repository: ${{ env.ATEX_HTML_REPO }}
+ ref: main
+ path: atex-html
+
+ - name: Update HTML title with PR number
+ if: always()
+ run: |
+ sed "/
/s/>.*>Test outputs from PR #${{ github.event.pull_request.number }} HTML" \
+ -i atex-html/index.html
+
+ - name: Merge test results from all versions
+ if: always()
+ run: |
+ mkdir -p atex-results-testing-farm/files_dir/
+
+ # Process and merge results for all CentOS Stream versions
+ for version in 8 9 10; do
+ results_file="test-results/cs${version}/results-centos-stream-${version}-x86_64.json.gz"
+ files_dir="test-results/cs${version}/files-centos-stream-${version}-x86_64"
+
+ if [ -f "${results_file}" ]; then
+ cat "${results_file}"
+ rm -f "${results_file}"
+ [ -d "${files_dir}" ] && cp -r "${files_dir}"/* atex-results-testing-farm/files_dir/
+ fi
+ done > results.json.gz
+
+ - name: Convert results to SQLite database
+ if: always()
+ run: |
+ python atex-html/json2db.py results.json.gz atex-results-testing-farm/results.sqlite.gz
+
+ - name: Prepare HTML results viewer
+ if: always()
+ run: |
+ cp -rf atex-html/index.html atex-html/sqljs/ atex-results-testing-farm/
+
+ - name: Commit and tag results in ATEX repository
+ if: always()
+ working-directory: atex-results-testing-farm
+ env:
+ GH_TOKEN: ${{ secrets.ATEX_RESULTS_TF_REPO_TOKEN }}
+ PR_NUMBER: ${{ github.event.pull_request.number }}
+ run: |
+ git config user.name "openscap-ci[bot]"
+ git config user.email "openscap.ci@gmail.com"
+
+ git add .
+ git commit -m "Test outputs from PR #${PR_NUMBER}"
+ git tag PR${PR_NUMBER}
+ git push origin PR${PR_NUMBER}
+
+ - name: Submit results to Testing Farm
+ if: always()
+ id: testing_farm_request
+ env:
+ TESTING_FARM_API_TOKEN: ${{ secrets.TESTING_FARM_API_TOKEN }}
+ PR_NUMBER: ${{ github.event.pull_request.number }}
+ run: |
+ python3 tests/submit_results_to_testing_farm.py \
+ --repo-url "https://github.com/${{ env.ATEX_REPO }}" \
+ --pr-number "${PR_NUMBER}" 2>&1 | tee tf_output.log
+
+ # Extract HTML link from output
+ html_link=$(grep -oP 'HTML: \K.*' tf_output.log || echo 'No HTML link found')
+ echo "HTML_LINK=${html_link}" >> "$GITHUB_OUTPUT"
+
+ - name: Find existing PR comment
+ if: always()
+ uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3
+ id: fc
+ with:
+ issue-number: ${{ github.event.pull_request.number }}
+ comment-author: 'github-actions[bot]'
+ body-includes: Testing Farm Results
+
+ - name: Create or update PR comment with results
+ if: always()
+ uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v4
+ with:
+ comment-id: ${{ steps.fc.outputs.comment-id }}
+ issue-number: ${{ github.event.pull_request.number }}
+ body: |
+ ### Testing Farm Results
+
+ Test artifacts have been submitted to Testing Farm.
+
+ **Results:** [View Test Results](${{ steps.testing_farm_request.outputs.HTML_LINK }})
+
+ _This comment was automatically generated by the ATEX workflow._
+ edit-mode: replace
+
+ - name: Cleanup temporary tag
+ if: always()
+ working-directory: atex-results-testing-farm
+ env:
+ GH_TOKEN: ${{ secrets.ATEX_RESULTS_TF_REPO_TOKEN }}
+ PR_NUMBER: ${{ github.event.pull_request.number }}
+ run: |
+ git push --delete origin PR${PR_NUMBER}
diff --git a/tests/run_tests_testingfarm.py b/tests/run_tests_testingfarm.py
new file mode 100644
index 000000000000..22bb70584069
--- /dev/null
+++ b/tests/run_tests_testingfarm.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python3
+
+import sys
+import time
+import gzip
+import logging
+import argparse
+import contextlib
+from pathlib import Path
+
+from atex.provisioner.testingfarm import TestingFarmProvisioner
+from atex.orchestrator.contest import ContestOrchestrator
+from atex.aggregator.json import JSONAggregator
+from atex.fmf import FMFTests
+
+logger = logging.getLogger("ATEX")
+
+# Parse command-line arguments
+parser = argparse.ArgumentParser(description="Run tests on Testing Farm using atex")
+parser.add_argument("--contest-dir", required=True, help="Path to contest repository")
+parser.add_argument("--content-dir", required=True, help="Path to built content directory")
+parser.add_argument("--plan", required=True, help="TMT plan to run (e.g., daily|ci-gating|weekly)")
+parser.add_argument("--compose", required=True, help="compose (e.g., Centos-Stream-9)")
+parser.add_argument("--arch", default="x86_64", help="Architecture")
+parser.add_argument("--os-major-version", required=True, help="OS Major Version (8|9|10)")
+parser.add_argument("--tests", nargs="*", help="Specific tests to run (optional, runs all if not specified)")
+parser.add_argument("--timeout", type=int, default=120, help="Timeout in minutes")
+parser.add_argument("--max-remotes", type=int, default=10, help="Maximum number of parallel test executions")
+parser.add_argument("--reruns", type=int, default=1, help="Number of test reruns on failure")
+args = parser.parse_args()
+
+# variables export to tests
+test_env = {
+ "CONTEST_CONTENT": ContestOrchestrator.content_dir_on_remote,
+ "CONTEST_VERBOSE": "2",
+}
+
+with contextlib.ExitStack() as stack:
+ # log brief info to console, be verbose in a separate file-based log
+ console_log = logging.StreamHandler(sys.stderr)
+ console_log.setLevel(logging.INFO)
+ debug_log_fobj = stack.enter_context(gzip.open("atex_debug.log.gz", "wt"))
+ file_log = logging.StreamHandler(debug_log_fobj)
+ file_log.setLevel(logging.DEBUG)
+ logging.basicConfig(
+ level=logging.DEBUG,
+ format="%(asctime)s %(name)s: %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ handlers=(console_log, file_log),
+ force=True,
+ )
+
+ # Load FMF tests from contest directory
+ fmf_tests = FMFTests(
+ args.contest_dir,
+ args.plan,
+ names=args.tests or None,
+ context={
+ "distro": f"centos-stream-{args.os_major_version}",
+ "arch": args.arch,
+ },
+ )
+
+ logger.info(f"plan: {args.plan}")
+ logger.info(f"os major version: {args.os_major_version}")
+ logger.info(f"arch: {args.arch}")
+ logger.info(f"compose: {args.compose}")
+ logger.info("will run:")
+ for test in fmf_tests.tests:
+ logger.info(f" {test}")
+
+ # Setup result aggregator
+ output_results = f"results-centos-stream-{args.os_major_version}-{args.arch}.json.gz"
+ output_files = f"files-centos-stream-{args.os_major_version}-{args.arch}"
+ partial_runs = Path(output_files) / "old_runs"
+ aggregator = JSONAggregator(output_results, output_files)
+ stack.enter_context(aggregator)
+
+ partial_runs.mkdir(parents=True, exist_ok=True)
+
+ platform_name = f"cs{args.os_major_version}@{args.arch}"
+
+ # Hardware requirements for Testing Farm
+ # if args.arch == "x86_64":
+ # hw = {"virtualization": {"is-supported": True}, "memory": ">= 7 GB"}
+ # else:
+ # hw = None
+
+ # Setup Testing Farm provisioner
+ prov = TestingFarmProvisioner(
+ compose=args.compose,
+ arch=args.arch,
+ max_retries=2,
+ timeout=args.timeout,
+ # hardware=hw,
+ )
+
+ # Setup Contest orchestrator
+ orchestrator = ContestOrchestrator(
+ platform=platform_name,
+ fmf_tests=fmf_tests,
+ provisioners=[prov],
+ aggregator=aggregator,
+ tmp_dir=partial_runs,
+ max_remotes=args.max_remotes,
+ max_spares=2,
+ max_reruns=args.reruns,
+ content_dir=args.content_dir,
+ env=test_env,
+ )
+ stack.enter_context(orchestrator)
+
+ logger.info("Starting test execution...")
+ next_writeout = time.monotonic() + 600
+ while orchestrator.serve_once():
+ if time.monotonic() > next_writeout:
+ logger.info(
+ f"queued: {len(orchestrator.to_run)}/{len(fmf_tests.tests)} tests, "
+ f"running: {len(orchestrator.running_tests)} tests",
+ )
+ next_writeout = time.monotonic() + 600
+ time.sleep(1)
+
+ logger.info("Test execution completed!")
+
+# Check if there were failures
+logger.info(f"Results written to: {output_results}")
+logger.info(f"Test files in: {output_files}")
diff --git a/tests/submit_results_to_testing_farm.py b/tests/submit_results_to_testing_farm.py
new file mode 100644
index 000000000000..cf968efd61b3
--- /dev/null
+++ b/tests/submit_results_to_testing_farm.py
@@ -0,0 +1,88 @@
+#!/usr/bin/python3
+
+import sys
+import time
+import atexit
+import logging
+import argparse
+import xml.etree.ElementTree as ET
+
+from atex.provisioner.testingfarm import api
+
+
+# reuse urllib3 PoolManager configured for heavy Retry attempts
+# (because of TestingFarm API reboots, and other transient issues)
+http = api._http
+
+logging.basicConfig(
+ level=logging.INFO, # use DEBUG to see HTTP queries
+ stream=sys.stderr,
+ format="%(asctime)s %(name)s: %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+)
+
+# Parse command-line arguments
+parser = argparse.ArgumentParser(description="Submit TMT test to Testing Farm")
+parser.add_argument("--repo-url", required=True, help="GitHub repository URL")
+parser.add_argument("--pr-number", required=True, help="Pull request number")
+parser.add_argument("--plan-name", default="/dummy_plan", help="TMT plan name to run")
+parser.add_argument("--os", default=None, help="OS to test on (e.g., rhel-9)")
+parser.add_argument("--arch", default="x86_64", help="Architecture to test on")
+args = parser.parse_args()
+
+request_json = {
+ "test": {
+ "fmf": {
+ "url": args.repo_url,
+ "ref": f"PR{args.pr_number}",
+ "name": args.plan_name,
+ },
+ },
+ "environments": [{"arch": args.arch, "os": args.os}],
+}
+
+# do faster queries than the default 30 secs, because we don't track
+# many dozens of requests, just one
+class FastRequest(api.Request):
+ api_query_limit = 5
+
+req = FastRequest()
+req.submit(request_json)
+atexit.register(req.cancel) # just in case we traceback
+
+req.wait_for_state("running")
+
+# artifacts URL doesn't appear instantly, wait for it
+while "run" not in req:
+ time.sleep(FastRequest.api_query_limit)
+while "artifacts" not in req["run"]:
+ time.sleep(FastRequest.api_query_limit)
+
+artifacts_url = req["run"]["artifacts"]
+logging.info(f"artifacts: {artifacts_url}")
+
+# results.xml appears only after completion
+req.wait_for_state("complete")
+atexit.unregister(req.cancel)
+
+# get results.xml for those artifacts, which is a XML representation of the
+# HTML artifacts view and contains links to logs and workdir
+reply = http.request("GET", f"{artifacts_url}/results.xml")
+if reply.status != 200:
+ raise RuntimeError("could not get results.xml")
+
+# find which log is the workdir and get its URL
+results_xml = ET.fromstring(reply.data)
+for log in results_xml.find("testsuite").find("logs"):
+ if log.get("name") == "workdir":
+ workdir_url = log.get("href")
+ break
+else:
+ raise RuntimeError("could not find workdir")
+
+# TODO: a more reliable way would be to read
+# {workdir_url}/testing-farm/sanity/execute/results.yaml
+# as YAML and look for the test name and get its 'data-path'
+# relative to the /execute/ dir
+logging.info(f"HTML: {workdir_url}/dummy_plan/execute/data/guest/default-0/dummy_test-1/data/index.html?q=TRUE")
+