diff --git a/ci/test_cpp.sh b/ci/test_cpp.sh index 653c44133a..719e26bcb9 100755 --- a/ci/test_cpp.sh +++ b/ci/test_cpp.sh @@ -1,10 +1,14 @@ #!/bin/bash -# SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 set -euo pipefail +# shellcheck source=ci/utils/cuopt_coredumps.sh +source "$(dirname "${BASH_SOURCE[0]}")/utils/cuopt_coredumps.sh" +cuopt_coredumps_ci_setup + . /opt/conda/etc/profile.d/conda.sh rapids-logger "Configuring conda strict channel priority" @@ -47,12 +51,36 @@ popd EXITCODE=0 trap "EXITCODE=1" ERR set +e - # Run gtests from libcuopt-tests package export GTEST_OUTPUT=xml:${RAPIDS_TESTS_DIR}/ rapids-logger "Run gtests" timeout 40m ./ci/run_ctests.sh +# Optional core-dump path check: no compiled binary — child bash sends itself SIGSEGV. +# Child exits 139; || true keeps this script running so the EXIT trap can collect cores. +# For normal CI, leave unset and set CUOPT_CI_COREDUMP_PROBE=1 only when probing artifacts. +CUOPT_CI_COREDUMP_PROBE=1 +if [[ "${CUOPT_CI_COREDUMP_PROBE:-}" == 1 ]]; then + rapids-logger "CUOPT_CI_COREDUMP_PROBE: child bash SIGSEGV (core dump artifact check)" + # Count core files before the probe. + _probe_n_before="$(find "${CUOPT_COREDUMP_DIR:-/dev/null}" -type f 2>/dev/null | wc -l | tr -d '[:space:]')" + bash -c 'kill -SEGV $$' || true + # Brief pause so the kernel can finish writing the core. + sleep 1 + # Eagerly collect now so we can verify the probe worked. + cuopt_collect_coredumps || true + _probe_n_after="$(find "${CUOPT_COREDUMP_DIR:-/dev/null}" -type f 2>/dev/null | wc -l | tr -d '[:space:]')" + if [[ "${_probe_n_after}" -gt "${_probe_n_before}" ]]; then + rapids-logger "COREDUMP_PROBE: SUCCESS — $((_probe_n_after - _probe_n_before)) core file(s) collected" + else + rapids-logger "COREDUMP_PROBE: FAILED — no core file collected for SIGSEGV probe" + rapids-logger " core_pattern=$(cat /proc/sys/kernel/core_pattern 2>/dev/null || echo n/a)" + rapids-logger " ulimit -c=$(ulimit -c)" + rapids-logger " CUOPT_COREDUMP_DIR=${CUOPT_COREDUMP_DIR:-unset}" + rapids-logger " Hint: core_pattern may require a privileged container or --cap-add=SYS_PTRACE" + fi +fi + rapids-logger "Test script exiting with value: $EXITCODE" exit ${EXITCODE} diff --git a/ci/test_cpp_memcheck.sh b/ci/test_cpp_memcheck.sh index bc4bce4cbc..8b61198240 100755 --- a/ci/test_cpp_memcheck.sh +++ b/ci/test_cpp_memcheck.sh @@ -1,6 +1,6 @@ #!/bin/bash -# SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 if [[ "$(date +%A)" != "Friday" ]]; then @@ -10,6 +10,10 @@ fi set -euo pipefail +# shellcheck source=ci/utils/cuopt_coredumps.sh +source "$(dirname "${BASH_SOURCE[0]}")/utils/cuopt_coredumps.sh" +cuopt_coredumps_ci_setup + . /opt/conda/etc/profile.d/conda.sh rapids-logger "Configuring conda strict channel priority" diff --git a/ci/test_notebooks.sh b/ci/test_notebooks.sh index 22c41af84c..54bb4d2967 100755 --- a/ci/test_notebooks.sh +++ b/ci/test_notebooks.sh @@ -1,10 +1,14 @@ #!/bin/bash -# SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 set -euo pipefail +# shellcheck source=ci/utils/cuopt_coredumps.sh +source "$(dirname "${BASH_SOURCE[0]}")/utils/cuopt_coredumps.sh" +cuopt_coredumps_ci_setup + . /opt/conda/etc/profile.d/conda.sh rapids-logger "Configuring conda strict channel priority" diff --git a/ci/test_python.sh b/ci/test_python.sh index 4f91c83334..831d4dc174 100755 --- a/ci/test_python.sh +++ b/ci/test_python.sh @@ -5,6 +5,10 @@ set -euo pipefail +# shellcheck source=ci/utils/cuopt_coredumps.sh +source "$(dirname "${BASH_SOURCE[0]}")/utils/cuopt_coredumps.sh" +cuopt_coredumps_ci_setup + . /opt/conda/etc/profile.d/conda.sh rapids-logger "Configuring conda strict channel priority" diff --git a/ci/test_self_hosted_service.sh b/ci/test_self_hosted_service.sh index 601b45326b..ef0ab14b35 100755 --- a/ci/test_self_hosted_service.sh +++ b/ci/test_self_hosted_service.sh @@ -5,6 +5,10 @@ set -euo pipefail +# shellcheck source=ci/utils/cuopt_coredumps.sh +source "$(dirname "${BASH_SOURCE[0]}")/utils/cuopt_coredumps.sh" +cuopt_coredumps_ci_setup + source rapids-init-pip # Download the cuopt built in the previous step diff --git a/ci/test_skills_assets.sh b/ci/test_skills_assets.sh index c75645cb93..583044bea0 100755 --- a/ci/test_skills_assets.sh +++ b/ci/test_skills_assets.sh @@ -10,6 +10,10 @@ set -euo pipefail +# shellcheck source=ci/utils/cuopt_coredumps.sh +source "$(dirname "${BASH_SOURCE[0]}")/utils/cuopt_coredumps.sh" +cuopt_coredumps_ci_setup + # Use rapids-logger in CI; fall back to echo for local testing if command -v rapids-logger &>/dev/null; then log() { rapids-logger "$*"; } diff --git a/ci/test_wheel_cuopt.sh b/ci/test_wheel_cuopt.sh index a327082e83..2859b96fe3 100755 --- a/ci/test_wheel_cuopt.sh +++ b/ci/test_wheel_cuopt.sh @@ -5,6 +5,10 @@ set -euo pipefail +# shellcheck source=ci/utils/cuopt_coredumps.sh +source "$(dirname "${BASH_SOURCE[0]}")/utils/cuopt_coredumps.sh" +cuopt_coredumps_ci_setup + # sets up a constraints file for 'pip' and puts its location in an exported variable PIP_EXPORT, # so those constraints will affect all future 'pip install' calls source rapids-init-pip diff --git a/ci/test_wheel_cuopt_server.sh b/ci/test_wheel_cuopt_server.sh index a76969b965..cda880765c 100755 --- a/ci/test_wheel_cuopt_server.sh +++ b/ci/test_wheel_cuopt_server.sh @@ -5,6 +5,10 @@ set -eou pipefail +# shellcheck source=ci/utils/cuopt_coredumps.sh +source "$(dirname "${BASH_SOURCE[0]}")/utils/cuopt_coredumps.sh" +cuopt_coredumps_ci_setup + source rapids-init-pip # Download the packages built in the previous step diff --git a/ci/utils/cuopt_coredumps.sh b/ci/utils/cuopt_coredumps.sh new file mode 100644 index 0000000000..b0a85d5918 --- /dev/null +++ b/ci/utils/cuopt_coredumps.sh @@ -0,0 +1,389 @@ +#!/usr/bin/env bash + +# SPDX-FileCopyrightText: Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Enable core dumps during CI test scripts and collect files into +# ${RAPIDS_ARTIFACTS_DIR}/${CUOPT_GDB_CORE_ARTIFACT_DIR} so rapids-upload-artifacts-dir +# uploads them to S3 as: +# {rapids-matrix-prefix}.{cuopt-gdb-cores_JOB_cudaVER_pyVER_arch_BUILDTYPE} +# RAPIDS rapids-upload-to-s3 tgz-streams each directory (gzip-compressed tar); the object +# name often has no .tar.gz suffix in listings, but downloads are still archives. Very small +# sizes (~100 B) usually mean an almost-empty archive (no core files landed on disk). The +# trailing segment includes a job label, resolved in order: +# 1) CUOPT_CI_JOB_LABEL if set (workflow/setter can export the real GitHub job id). +# 2) GITHUB_JOB if it looks like a caller job id (not generic RAPIDS callee ids such as tests/test). +# 3) The ci/test_*.sh that sourced this file: label derived by naming rules (new drivers need +# no edits here — e.g. test_foo.sh → conda-foo-tests, test_wheel_bar.sh → wheel-bar-tests). +# Then CUDA / Python / arch / build_type from RAPIDS CI env. +# +# Test drivers: source ci/utils/cuopt_coredumps.sh from a sibling ci/test_*.sh, then call +# cuopt_coredumps_ci_setup (enable + EXIT trap). + +# Set in cuopt_enable_coredumps; collect reuses when non-empty. +CUOPT_GDB_CORE_ARTIFACT_DIR= + +# Reusable RAPIDS workflows often use non-unique job ids ("tests", "test", "build", …). +# GITHUB_JOB=test (singular) is common; treating it as meaningful produced labels like +# "test" and hid script-based names (wheel-cuopt-tests, conda-cpp-tests). +cuopt__github_job_is_generic() { + case "${1:-}" in + "" | test | tests | build | compute-matrix | prepare | package) return 0 ;; + *) return 1 ;; + esac +} + +# test_cpp.sh → conda-cpp-tests; test_wheel_cuopt_server.sh → wheel-cuopt-server-tests; etc. +cuopt__job_label_from_entry_script_basename() { + local b="$1" + b="${b%.sh}" + case "$b" in + test_wheel_*) + b="${b#test_wheel_}" + echo "wheel-${b//_/-}-tests" + ;; + test_self_hosted_*) + b="${b#test_self_hosted_}" + echo "self-hosted-${b//_/-}-tests" + ;; + test_skills_*) + b="${b#test_skills_}" + echo "conda-skills-${b//_/-}" + ;; + test_*memcheck) + b="${b#test_}" + echo "conda-${b//_/-}" + ;; + test_*) + b="${b#test_}" + echo "conda-${b//_/-}-tests" + ;; + *) + echo "unknown-job" + ;; + esac +} + +cuopt__find_ci_entry_test_script_basename() { + local i f base + for ((i = 0; i < ${#BASH_SOURCE[@]}; i++)); do + f="${BASH_SOURCE[$i]}" + base="$(basename "${f}")" + [[ "${base}" == "cuopt_coredumps.sh" ]] && continue + case "${base}" in + test_*.sh) echo "${base}"; return ;; + esac + done + echo "" +} + +cuopt__infer_ci_job_label_from_call_stack() { + local nb + nb="$(cuopt__find_ci_entry_test_script_basename)" + if [[ -n "${nb}" ]]; then + cuopt__job_label_from_entry_script_basename "${nb}" + return + fi + echo "unknown-job" +} + +cuopt__resolve_ci_job_label() { + if [[ -n "${CUOPT_CI_JOB_LABEL:-}" ]]; then + echo "${CUOPT_CI_JOB_LABEL}" + return + fi + if [[ -n "${GITHUB_JOB:-}" ]] && ! cuopt__github_job_is_generic "${GITHUB_JOB}"; then + echo "${GITHUB_JOB}" + return + fi + cuopt__infer_ci_job_label_from_call_stack +} + +cuopt__gdb_core_artifact_basename() { + local job cuda_ver py_ver arch_ bt + job="$(cuopt__resolve_ci_job_label)" + job="${job//[^a-zA-Z0-9_-]/_}" + cuda_ver="${RAPIDS_CUDA_VERSION:-unknown}" + cuda_ver="${cuda_ver//[^a-zA-Z0-9._-]/_}" + py_ver="${RAPIDS_PY_VERSION:-na}" + py_ver="${py_ver//[^a-zA-Z0-9._-]/_}" + arch_="$(arch 2>/dev/null || true)" + [[ -z "${arch_}" ]] && arch_="$(uname -m)" + arch_="${arch_//[^a-zA-Z0-9_-]/_}" + bt="${RAPIDS_BUILD_TYPE:-na}" + bt="${bt//[^a-zA-Z0-9_-]/_}" + echo "cuopt-gdb-cores_${job}_cuda${cuda_ver}_py${py_ver}_${arch_}_${bt}" +} + +cuopt__log() { + if declare -F rapids-logger &>/dev/null; then + rapids-logger "$1" + else + echo "$1" + fi +} + +# When core_pattern pipes to a handler (apport/systemd-coredump), replace the handler +# binary with a forwarder script that reads the core from stdin and writes it to disk. +# The kernel invokes: |/path/to/handler -p%p -s%s ... -- %E +# Our replacement reads stdin (the raw core) and writes it using the -p (PID) argument. +cuopt__hijack_pipe_handler() { + local pattern="$1" dest="$2" + # Extract the handler binary path (first token after the leading '|'). + local handler + handler="$(echo "${pattern}" | sed 's/^|//; s/ .*//')" + [[ -n "${handler}" && -f "${handler}" ]] || return 0 + + # Back up the original handler (only once). + local backup="${handler}.cuopt_orig" + if [[ ! -f "${backup}" ]]; then + cp -a "${handler}" "${backup}" 2>/dev/null || { + cuopt__log "WARNING: cannot back up ${handler} — hijack skipped (read-only?)" + return 0 + } + fi + + # Write a forwarder script that saves stdin (core dump) to the artifact dir. + # The kernel passes flags like -p PID; we parse -p to name the file. + cat > "${handler}" <<'FORWARDER_EOF' +#!/bin/sh +# cuOpt core-dump forwarder — replaces apport/systemd-coredump handler. +# Reads raw core dump from stdin, writes to CUOPT_COREDUMP_DIR. +PID="unknown" +EXE="unknown" +while [ $# -gt 0 ]; do + case "$1" in + -p) shift; PID="$1" ;; + -p*) PID="${1#-p}" ;; + --) shift; EXE="$(echo "$*" | tr '/' '_')" ; break ;; + esac + shift +done +FORWARDER_EOF + # Append the dest directory (expanded now, not at runtime). + cat >> "${handler}" <> "${handler}" <<'FORWARDER_BODY' +mkdir -p "${DEST}" 2>/dev/null +CORE_FILE="${DEST}/core.${EXE}.${PID}" +cat > "${CORE_FILE}" +if [ -s "${CORE_FILE}" ]; then + echo "cuopt-forwarder: saved core for PID ${PID} (${EXE}) → ${CORE_FILE}" >&2 +else + rm -f "${CORE_FILE}" 2>/dev/null +fi +FORWARDER_BODY + chmod +x "${handler}" 2>/dev/null || true + cuopt__log "Hijacked pipe handler ${handler} → core forwarder (dest=${dest})" +} + +cuopt_enable_coredumps() { + local ws base pattern + ws="${GITHUB_WORKSPACE:-${PWD}}" + base="${RAPIDS_ARTIFACTS_DIR:-${ws}/artifacts}" + CUOPT_CI_JOB_LABEL="$(cuopt__resolve_ci_job_label)" + export CUOPT_CI_JOB_LABEL + CUOPT_GDB_CORE_ARTIFACT_DIR="$(cuopt__gdb_core_artifact_basename)" + export CUOPT_GDB_CORE_ARTIFACT_DIR + export CUOPT_COREDUMP_DIR="${base}/${CUOPT_GDB_CORE_ARTIFACT_DIR}" + # Record startup time so coredumpctl collection can filter to this session only. + export CUOPT_COREDUMP_SINCE + CUOPT_COREDUMP_SINCE="$(date '+%Y-%m-%d %H:%M:%S' 2>/dev/null || echo '')" + mkdir -p "${CUOPT_COREDUMP_DIR}" + + local pattern_target="${CUOPT_COREDUMP_DIR}/core.%e.%p.%t" + + # Raise soft limit to match hard limit when possible (some shells default to 0). + ulimit -c unlimited 2>/dev/null || true + ulimit -H -c unlimited 2>/dev/null || true + + # Write the coredump filter to the kernel's per-process file (env var alone has no effect). + # 0xff = dump all memory segments (shared, private, huge, DAX — Linux 4.6+). + local filter="${COREDUMP_FILTER:-0xff}" + if [[ -w /proc/self/coredump_filter ]]; then + echo "${filter}" >/proc/self/coredump_filter 2>/dev/null || true + fi + + # Prefer writing cores as files under CUOPT_COREDUMP_DIR (often fails in unprivileged Docker). + if [[ -w /proc/sys/kernel/core_pattern ]]; then + echo "${pattern_target}" >/proc/sys/kernel/core_pattern 2>/dev/null || true + fi + if command -v sysctl >/dev/null 2>&1; then + sysctl -q -w "kernel.core_pattern=${pattern_target}" 2>/dev/null || true + fi + + pattern="$(cat /proc/sys/kernel/core_pattern 2>/dev/null || echo n/a)" + + # Track whether core_pattern points to our directory (file-based) or a pipe/collector. + export CUOPT_COREDUMP_PATTERN_IS_PIPE=0 + if [[ "${pattern}" == \|* ]]; then + CUOPT_COREDUMP_PATTERN_IS_PIPE=1 + # Attempt to hijack the pipe handler (e.g. apport) with a forwarder that saves cores + # as files. The kernel pipes the core dump on stdin to the handler binary; if we replace + # it with our own script, the core lands in CUOPT_COREDUMP_DIR. + cuopt__hijack_pipe_handler "${pattern}" "${CUOPT_COREDUMP_DIR}" + fi + + local coredump_filter_val="n/a" + if [[ -r /proc/self/coredump_filter ]]; then + coredump_filter_val="$(cat /proc/self/coredump_filter 2>/dev/null || echo n/a)" + fi + + cuopt__log "Core dumps: dir=${CUOPT_COREDUMP_DIR} ulimit=$(ulimit -c) core_pattern=${pattern} coredump_filter=${coredump_filter_val}" + + if [[ "${CUOPT_COREDUMP_PATTERN_IS_PIPE}" == 1 ]]; then + local _pipe_msg="core_pattern pipes to a collector." + if [[ -f "$(echo "${pattern}" | sed 's/^|//; s/ .*//' ).cuopt_orig" ]]; then + _pipe_msg+=" Handler hijacked with core forwarder — cores should land in ${CUOPT_COREDUMP_DIR}." + else + _pipe_msg+=" Handler hijack failed. Fallback: coredumpctl / /var/crash at collection time." + if command -v coredumpctl &>/dev/null; then + _pipe_msg+=" coredumpctl is available." + else + _pipe_msg+=" coredumpctl NOT found; cores may be lost." + fi + fi + cuopt__log "${_pipe_msg}" + fi +} + +# Copy a single core file into the artifact directory with a sanitized name. +cuopt__copy_core_to_dest() { + local f="$1" dest="$2" label="${3:-}" + [[ -f "${f}" && -s "${f}" ]] || return 0 + local base_name + base_name="$(basename "${f}")" + if [[ -n "${label}" ]]; then + base_name="${label}_${base_name}" + fi + base_name="${base_name//\//_}" + local dest_path="${dest}/${base_name}" + if [[ -e "${dest_path}" ]]; then + dest_path="${dest}/${base_name}.${RANDOM}" + fi + cp -a "${f}" "${dest_path}" 2>/dev/null || true +} + +# Collect cores written as files (core_pattern was file-based or we got lucky). +cuopt__collect_core_files() { + local dest="$1" + shift + local search_dirs=("$@") + local f + for dir in "${search_dirs[@]}"; do + [[ -d "${dir}" ]] || continue + while IFS= read -r -d '' f; do + [[ -f "${f}" ]] || continue + # Skip files already in dest. + case "${f}" in + "${dest}/"*) continue ;; + esac + cuopt__copy_core_to_dest "${f}" "${dest}" "" + done < <( + find "${dir}" \ + \( -path '*/.git/*' -o -path '*/opt/conda/*' -o -path '*/conda_pkgs/*' -o -path "${dest}/*" \) -prune -o \ + \( -name 'core' -o -name 'core.*' \) -type f -print0 2>/dev/null + ) + done +} + +# Fallback: extract cores via coredumpctl (systemd-coredump handler). +cuopt__collect_via_coredumpctl() { + local dest="$1" + command -v coredumpctl &>/dev/null || return 0 + + cuopt__log "Attempting coredumpctl extraction (core_pattern is piped to systemd-coredump)" + + # Build the coredumpctl list command — scope to this session if we have a start time. + local -a list_cmd=(coredumpctl list --no-pager --no-legend) + if [[ -n "${CUOPT_COREDUMP_SINCE:-}" ]]; then + list_cmd+=(--since "${CUOPT_COREDUMP_SINCE}") + cuopt__log " Filtering coredumpctl to cores since ${CUOPT_COREDUMP_SINCE}" + fi + + local line pid exe core_path + # --no-legend output format: DAY DATE TIME TZ PID UID GID SIG COREFILE EXE... + while IFS= read -r line; do + # Skip header / empty lines. + [[ "${line}" =~ ^[[:space:]]*[A-Z] ]] && continue + [[ -z "${line}" ]] && continue + # Parse PID (5th field) and EXE (last field). + pid="$(echo "${line}" | awk '{print $5}')" + exe="$(echo "${line}" | awk '{print $NF}')" + [[ -n "${pid}" ]] || continue + local exe_base + exe_base="$(basename "${exe:-unknown}")" + core_path="${dest}/coredumpctl_${pid}_${exe_base}.core" + # Skip if this PID was already extracted (e.g. by a prior probe collection). + if [[ -e "${core_path}" ]]; then + cuopt__log " Skipping PID ${pid} (${exe_base}) — already extracted" + continue + fi + coredumpctl dump "${pid}" -o "${core_path}" 2>/dev/null || true + if [[ -s "${core_path}" ]]; then + cuopt__log "Extracted core for PID ${pid} (${exe}) → ${core_path} ($(du -h "${core_path}" | cut -f1))" + else + rm -f "${core_path}" 2>/dev/null || true + fi + done < <("${list_cmd[@]}" 2>/dev/null || true) +} + +# Fallback: collect cores from apport crash reports (/var/crash). +cuopt__collect_from_apport() { + local dest="$1" + local crash_dir="/var/crash" + [[ -d "${crash_dir}" ]] || return 0 + local f + for f in "${crash_dir}"/*.crash "${crash_dir}"/core.* "${crash_dir}"/core; do + [[ -f "${f}" && -s "${f}" ]] || continue + cuopt__copy_core_to_dest "${f}" "${dest}" "apport" + done +} + +cuopt_collect_coredumps() { + local ws base dest n_before n_after + ws="${GITHUB_WORKSPACE:-${PWD}}" + base="${RAPIDS_ARTIFACTS_DIR:-${ws}/artifacts}" + if [[ -z "${CUOPT_GDB_CORE_ARTIFACT_DIR:-}" ]]; then + CUOPT_GDB_CORE_ARTIFACT_DIR="$(cuopt__gdb_core_artifact_basename)" + fi + dest="${base}/${CUOPT_GDB_CORE_ARTIFACT_DIR}" + mkdir -p "${dest}" + + n_before="$(find "${dest}" -type f 2>/dev/null | wc -l | tr -d '[:space:]')" + + # 1) Search for core files in workspace + common system locations. + cuopt__collect_core_files "${dest}" \ + "${ws}" "/tmp" "/var/lib/systemd/coredump" + + # 2) If core_pattern pipes to a collector, try extracting via coredumpctl / apport. + if [[ "${CUOPT_COREDUMP_PATTERN_IS_PIPE:-0}" == 1 ]]; then + cuopt__collect_via_coredumpctl "${dest}" + cuopt__collect_from_apport "${dest}" + fi + + n_after="$(find "${dest}" -type f 2>/dev/null | wc -l | tr -d '[:space:]')" + if [[ "${n_after}" -gt "${n_before}" ]]; then + cuopt__log "Collected $((n_after - n_before)) core file(s) into ${dest} (${n_after} total)" + ls -lh "${dest}"/ 2>/dev/null || true + else + cuopt__log "WARNING: No core files found. Cores may have been discarded by the system collector." + cuopt__log " core_pattern=$(cat /proc/sys/kernel/core_pattern 2>/dev/null || echo n/a)" + cuopt__log " Searched: ${ws} /tmp /var/lib/systemd/coredump (+ /var/crash if piped)" + if [[ "${CUOPT_COREDUMP_PATTERN_IS_PIPE:-0}" == 1 ]]; then + if command -v coredumpctl &>/dev/null; then + cuopt__log " coredumpctl list output:" + coredumpctl list --no-pager 2>/dev/null || true + else + cuopt__log " coredumpctl not available; cannot extract from systemd-coredump" + fi + fi + fi +} + +# Standard CI wiring for ci/test_*.sh: call once after sourcing this file. +cuopt_coredumps_ci_setup() { + cuopt_enable_coredumps + trap 'cuopt_collect_coredumps || true' EXIT +}