diff --git a/.github/workflows/push_pipeline.yml b/.github/workflows/push_pipeline.yml index 140a681..dee1f40 100644 --- a/.github/workflows/push_pipeline.yml +++ b/.github/workflows/push_pipeline.yml @@ -15,4 +15,4 @@ jobs: fetch-depth: 0 - name: Push artifact - run: make push + run: make push LOG_LEVEL=TRACE diff --git a/.github/workflows/test_pipeline.yml b/.github/workflows/test_pipeline.yml index 4564938..092592d 100644 --- a/.github/workflows/test_pipeline.yml +++ b/.github/workflows/test_pipeline.yml @@ -15,4 +15,4 @@ jobs: fetch-depth: 0 - name: Test - run: make test + run: make test LOG_LEVEL=TRACE diff --git a/AGENTS.md b/AGENTS.md index 667b1e6..6667218 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -53,6 +53,12 @@ Read that file for the full list. The most important ones for day-to-day work a Always run `make test` before considering any ticket complete. +When running `make` commands, use the default (low) verbosity. Increase output only +when needed: :code:`LOG_LEVEL=DEBUG` shows command lines; :code:`LOG_LEVEL=TRACE` +shows task stdout/stderr (e.g. :code:`make test LOG_LEVEL=DEBUG`). Failures always +log the failing command, return code, and that command's stdout and stderr at ERROR, +so they are visible at any level. + --- ## 3. Running Commands in Docker diff --git a/Makefile b/Makefile index 75dbd9c..a4f8d2c 100644 --- a/Makefile +++ b/Makefile @@ -38,6 +38,7 @@ BASE_DOCKER_BUILD_ENV_COMMAND = docker run --rm \ --workdir=$(DOCKER_REMOTE_PROJECT_ROOT) \ -e PYTHONPATH=/usr/dev/build_support/src \ -e TAG_SUFFIX=$(TAG_SUFFIX) \ +-e LOG_LEVEL=$(LOG_LEVEL) \ $(GIT_MOUNT) \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(NON_DOCKER_ROOT):$(DOCKER_REMOTE_PROJECT_ROOT) diff --git a/build_support/src/build_support/__init__.py b/build_support/src/build_support/__init__.py index 96f8e89..d6636a2 100644 --- a/build_support/src/build_support/__init__.py +++ b/build_support/src/build_support/__init__.py @@ -9,6 +9,7 @@ project that is not part of the standard pipeline. Modules: + | build_logging: TRACE log level and registration for the build pipeline. | dag_engine: Contains the logic for resolving task dependencies and running tasks in a coherent order. | dump_ci_cd_run_info: A "main" that records project level variables for the CI/CD diff --git a/build_support/src/build_support/build_logging.py b/build_support/src/build_support/build_logging.py new file mode 100644 index 0000000..975eff3 --- /dev/null +++ b/build_support/src/build_support/build_logging.py @@ -0,0 +1,49 @@ +"""Build pipeline logging: TRACE level and registration. + +The build uses three verbosity levels: INFO (steps only), DEBUG (+ command +lines), TRACE (+ task stdout/stderr). TRACE is a custom level below DEBUG. +This module defines the TRACE constant and registers it with the standard +logging module so LOG_LEVEL=TRACE works. + +Attributes: + | TRACE: Numeric log level (5) for most verbose build output. +""" + +import logging +import os +import sys + +# Below DEBUG (10); used for task stdout/stderr so they can be suppressed at DEBUG. +TRACE = 5 + + +def register_trace_level() -> None: + """Register the TRACE level name with the logging module. + + Call once at build startup (e.g. from execute_build_steps) so that + LOG_LEVEL=TRACE is recognized and logger.log(TRACE, ...) displays + correctly. + + Returns: + None + """ + logging.addLevelName(TRACE, "TRACE") + + +def _configure_build_logging() -> None: + """Configure build logging from LOG_LEVEL. + + Default is INFO (steps only). DEBUG adds commands. TRACE adds stdout/stderr. + Invalid or unset values fall back to INFO. + + Returns: + None + """ + register_trace_level() + level_name = os.environ.get("LOG_LEVEL", "INFO").upper() + level = getattr(logging, level_name, None) + if not isinstance(level, int): + level = TRACE if level_name == "TRACE" else logging.INFO + logging.basicConfig( + level=level, format="%(message)s", stream=sys.stdout, force=True + ) diff --git a/build_support/src/build_support/ci_cd_tasks/build_tasks.py b/build_support/src/build_support/ci_cd_tasks/build_tasks.py index fc33ee1..f75f985 100644 --- a/build_support/src/build_support/ci_cd_tasks/build_tasks.py +++ b/build_support/src/build_support/ci_cd_tasks/build_tasks.py @@ -106,7 +106,7 @@ def run(self) -> None: ), "uv", "build", - "--output", + "--out-dir", get_dist_dir(project_root=self.docker_project_root), ] ) diff --git a/build_support/src/build_support/dag_engine.py b/build_support/src/build_support/dag_engine.py index 311d1ac..25f2dfe 100644 --- a/build_support/src/build_support/dag_engine.py +++ b/build_support/src/build_support/dag_engine.py @@ -1,5 +1,10 @@ -"""Logic for building a DAG of tasks and running them in order.""" +"""Logic for building a DAG of tasks and running them in order. +Attributes: + | logger: Module-level logger for task execution and report output. +""" + +import logging from datetime import UTC, datetime, timedelta from pathlib import Path @@ -9,6 +14,8 @@ from build_support.ci_cd_tasks.task_node import TaskNode from build_support.ci_cd_vars.build_paths import get_build_runtime_report_path +logger = logging.getLogger(__name__) + def _add_tasks_to_list_with_dfs( execution_order: list[TaskNode], tasks_added: set[TaskNode], task_to_add: TaskNode @@ -90,11 +97,11 @@ def run_tasks(tasks: list[TaskNode], project_root: Path) -> None: """ run_report = BuildRunReport() task_execution_order = get_task_execution_order(requested_tasks=tasks) - print("Will execute the following tasks:", flush=True) # noqa: T201 + logger.info("Will execute the following tasks:") for task in task_execution_order: - print(f" - {task.task_label()}", flush=True) # noqa: T201 + logger.info(" - %s", task.task_label()) for task in task_execution_order: - print(f"Starting: {task.task_label()}", flush=True) # noqa: T201 + logger.info("Starting: %s", task.task_label()) start = datetime.now(tz=UTC) task.run() duration = datetime.now(tz=UTC) - start @@ -102,5 +109,5 @@ def run_tasks(tasks: list[TaskNode], project_root: Path) -> None: TaskRunReport(task_name=task.task_label(), duration=duration) ) report_content = run_report.to_yaml() - print(report_content) # noqa: T201 + logger.info("%s", report_content) get_build_runtime_report_path(project_root=project_root).write_text(report_content) diff --git a/build_support/src/build_support/execute_build_steps.py b/build_support/src/build_support/execute_build_steps.py index 08df36e..38e1572 100644 --- a/build_support/src/build_support/execute_build_steps.py +++ b/build_support/src/build_support/execute_build_steps.py @@ -2,12 +2,15 @@ Attributes: | CLI_ARG_TO_TASK: A dictionary of the CLI arg to the corresponding task to run. + | logger: Module-level logger for build failure and diagnostics. """ +import logging from argparse import ArgumentParser, Namespace from dataclasses import dataclass from pathlib import Path +from build_support.build_logging import _configure_build_logging from build_support.ci_cd_tasks.build_tasks import BuildAll, BuildDocs, BuildPypi from build_support.ci_cd_tasks.env_setup_tasks import ( Clean, @@ -36,7 +39,9 @@ from build_support.ci_cd_vars.subproject_structure import SubprojectContext from build_support.dag_engine import run_tasks from build_support.new_project_setup.setup_new_project import MakeProjectFromTemplate -from build_support.process_runner import ProcessVerbosity, concatenate_args, run_process +from build_support.process_runner import concatenate_args, run_process + +logger = logging.getLogger(__name__) @dataclass(frozen=True) @@ -152,8 +157,7 @@ def fix_permissions(local_user_uid: int, local_user_gid: int) -> None: if path.name not in [".git", "test_scratch_folder"] ], ] - ), - verbosity=ProcessVerbosity.SILENT, + ) ) @@ -197,7 +201,11 @@ def run_main(args: Namespace) -> None: Returns: None + + Raises: + Exception: Re-raised after logging if any task or setup fails. """ + _configure_build_logging() local_info_yaml = get_local_info_yaml(project_root=args.docker_project_root) basic_task_info = BasicTaskInfo.from_yaml(local_info_yaml.read_text()) requested_tasks = [ @@ -208,8 +216,11 @@ def run_main(args: Namespace) -> None: run_tasks( tasks=requested_tasks, project_root=basic_task_info.docker_project_root ) - except Exception as e: # noqa: BLE001 - print(e) # noqa: T201 + except Exception: + # logger.exception() logs at ERROR and automatically includes the exception + # message and full traceback (equivalent to exc_info=True). + logger.exception("Build failed") + raise finally: fix_permissions( local_user_uid=basic_task_info.local_uid, diff --git a/build_support/src/build_support/new_project_setup/new_project_data_models.py b/build_support/src/build_support/new_project_setup/new_project_data_models.py index a7c9848..5adf761 100644 --- a/build_support/src/build_support/new_project_setup/new_project_data_models.py +++ b/build_support/src/build_support/new_project_setup/new_project_data_models.py @@ -52,6 +52,14 @@ def formatted_name_and_email(self) -> str: """ return f"{self.name} <{self.contact_email}>" + def as_pyproject_author(self) -> dict[str, str]: + """Returns the organization as a PEP 621 author inline table entry. + + Returns: + dict[str, str]: A dict with 'name' and 'email' for project.authors. + """ + return {"name": self.name, "email": self.contact_email} + class ProjectSettings(BaseModel): """An object containing the project settings for this project.""" diff --git a/build_support/src/build_support/new_project_setup/update_pyproject_toml.py b/build_support/src/build_support/new_project_setup/update_pyproject_toml.py index 48a8cf5..f5cd55e 100644 --- a/build_support/src/build_support/new_project_setup/update_pyproject_toml.py +++ b/build_support/src/build_support/new_project_setup/update_pyproject_toml.py @@ -29,7 +29,7 @@ def update_pyproject_toml( project["version"] = "0.0.0" # type: ignore[index] project["license"] = new_project_settings.license # type: ignore[index] project["authors"] = [ # type: ignore[index] - new_project_settings.organization.formatted_name_and_email() + new_project_settings.organization.as_pyproject_author() ] hatch = pyproject_data["tool"]["hatch"] # type: ignore[index] hatch["build"]["targets"]["wheel"]["packages"] = [ # type: ignore[index] diff --git a/build_support/src/build_support/process_runner.py b/build_support/src/build_support/process_runner.py index a45f64b..fdc7263 100644 --- a/build_support/src/build_support/process_runner.py +++ b/build_support/src/build_support/process_runner.py @@ -1,30 +1,35 @@ -"""Module for running processes.""" +"""Module for running processes. + +Three verbosity levels: INFO = steps only; DEBUG = + command line; +TRACE = + stdout/stderr. On failure, command + return code + stdout + stderr +are always logged at ERROR. + +Attributes: + | logger: Module-level logger for subprocess command and output. +""" import itertools +import logging import sys -from enum import StrEnum # The purpose of this module is to make subprocess calls from subprocess import PIPE, Popen # nosec: B404 from typing import IO, Any +from build_support.build_logging import TRACE -class ProcessVerbosity(StrEnum): - """Enum for process verbosity to avoid boolean trap.""" - - SILENT = "SILENT" - ALL = "ALL" +logger = logging.getLogger(__name__) -def run_piped_processes( - processes: list[list[Any]], verbosity: ProcessVerbosity = ProcessVerbosity.ALL -) -> None: +def run_piped_processes(processes: list[list[Any]]) -> None: """Runs piped processes as they would be on the command line. + Command line at DEBUG (level 2); stdout/stderr at TRACE (level 3). + On failure all are logged at ERROR. + Args: processes (list[list[Any]]): The list of process arguments that will be run sequentially. - verbosity (ProcessVerbosity): Should the process be run silently. Returns: None @@ -32,8 +37,7 @@ def run_piped_processes( args_list = [get_str_args(args=args) for args in processes] process_strs = [" ".join(args) for args in args_list] command_as_str = " | ".join(process_strs) - if verbosity != ProcessVerbosity.SILENT: - print(command_as_str, flush=True) # noqa: T201 + logger.debug("%s", command_as_str) p1 = build_popen(args=args_list[0]) popen_processes = [p1] for args in args_list[1:]: @@ -47,42 +51,37 @@ def run_piped_processes( output=output, error=error, return_code=return_code, - verbosity=verbosity, ) -def get_output_of_process( - args: list[Any], verbosity: ProcessVerbosity = ProcessVerbosity.ALL -) -> str: +def get_output_of_process(args: list[Any]) -> str: """Runs a process and gets the output. Args: args (list[Any]): A list of arguments that could be run on the command line. - verbosity (ProcessVerbosity): Should the process be run silently. Returns: str: The stdout from the subprocess that was run. """ - output = run_process(args=args, verbosity=verbosity) + output = run_process(args=args) return output.decode("utf-8").strip() -def run_process( - args: list[Any], verbosity: ProcessVerbosity = ProcessVerbosity.ALL -) -> bytes: +def run_process(args: list[Any]) -> bytes: """Runs a process. + Command line at DEBUG (level 2); stdout/stderr at TRACE (level 3). + On failure all are logged at ERROR. + Args: args (list[Any]): A list of arguments that could be run on the command line. - verbosity (ProcessVerbosity): Should the process be run silently. Returns: bytes: The stdout from the subprocess that was run. """ str_args = get_str_args(args=args) command_as_str = " ".join(str_args) - if verbosity != ProcessVerbosity.SILENT: - print(command_as_str, flush=True) # noqa: T201 + logger.debug("%s", command_as_str) p = build_popen(args=str_args) output, error = p.communicate() return_code = p.returncode @@ -91,7 +90,6 @@ def run_process( output=output, error=error, return_code=return_code, - verbosity=verbosity, ) return output @@ -101,7 +99,7 @@ def build_popen(args: list[str], stdin: IO[bytes] | int | None = None) -> Popen[ Args: args (list[str]): The args to pass to the new process. - stdin (IO | int | None): The stdin to use with the new process. + stdin (IO[bytes] | int | None): The stdin to use with the new process. Returns: Popen: A Popen instance with parameters set by the inputs to this function. @@ -113,36 +111,33 @@ def build_popen(args: list[str], stdin: IO[bytes] | int | None = None) -> Popen[ def resolve_process_results( - command_as_str: str, - output: bytes, - error: bytes, - return_code: int, - verbosity: ProcessVerbosity = ProcessVerbosity.ALL, + command_as_str: str, output: bytes, error: bytes, return_code: int ) -> None: - """Prints outputs and errors and exits as appropriate when a command exits. + """Logs outputs and errors and exits as appropriate when a command exits. + + On success: stdout/stderr at TRACE (level 3). On failure: command, return code, + stdout and stderr are all logged at ERROR so they are visible at any LOG_LEVEL. Args: command_as_str (str): The command run as it would appear on the command line. output (bytes): The stdout from the subprocess that was run. error (bytes): The stderr from the subprocess that was run. return_code (int): The return code from the subprocess that was run. - verbosity (ProcessVerbosity): Was the subprocess intended to run silently. Returns: None """ - if output and verbosity != ProcessVerbosity.SILENT: - print(output.decode("utf-8"), flush=True, end="") # noqa: T201 - if error: - print(error.decode("utf-8"), flush=True, end="", file=sys.stderr) # noqa: T201 if return_code != 0: - if verbosity == ProcessVerbosity.SILENT: - print( # noqa: T201 - f"{command_as_str}\nFailed with code: {return_code}", - flush=True, - file=sys.stderr, - ) + logger.error("%s\nFailed with code: %s", command_as_str, return_code) + if output: + logger.error("stdout:\n%s", output.decode("utf-8")) + if error: + logger.error("stderr:\n%s", error.decode("utf-8")) sys.exit(return_code) + if output: + logger.log(TRACE, "stdout:\n%s", output.decode("utf-8"), stack_info=False) + if error: + logger.log(TRACE, "stderr:\n%s", error.decode("utf-8"), stack_info=False) def concatenate_args(args: list[Any | list[Any]]) -> list[str]: diff --git a/build_support/src/build_support/report_build_var.py b/build_support/src/build_support/report_build_var.py index 92b9ed2..c024302 100644 --- a/build_support/src/build_support/report_build_var.py +++ b/build_support/src/build_support/report_build_var.py @@ -90,6 +90,7 @@ def run_main(args: Namespace) -> None: case _: # pragma: no cov - can't hit if all enums are implemented msg = f"{command!r} is not a supported enum of AllowedCliArgs." raise ValueError(msg) + # Stdout is consumed by the Makefile; intentionally print for data output. print(" ".join(values)) # noqa: T201 diff --git a/build_support/test/feature_tests/test_95_template_python_project.py b/build_support/test/feature_tests/test_95_template_python_project.py new file mode 100644 index 0000000..22b4bc4 --- /dev/null +++ b/build_support/test/feature_tests/test_95_template_python_project.py @@ -0,0 +1,97 @@ +"""Feature tests for configurable build logging (LOG_LEVEL). + +Three levels: INFO = steps only; DEBUG = steps + commands; TRACE = steps + commands ++ task stdout/stderr. Failures always log command, code, stdout, and stderr at ERROR. +""" + +from pathlib import Path + +import pytest +from build_support.ci_cd_vars.subproject_structure import ( + SubprojectContext, + get_python_subproject, +) +from test_utils.command_runner import run_command_and_save_logs + + +@pytest.mark.usefixtures("mock_lightweight_project") +def test_default_log_level_shows_only_workflow( + mock_project_root: Path, make_command_prefix: list[str], real_project_root_dir: Path +) -> None: + """At default (INFO) level only workflow messages appear; no task output.""" + return_code, stdout, stderr = run_command_and_save_logs( + args=[*make_command_prefix, "format"], + cwd=mock_project_root, + test_name="test_default_log_level_shows_only_workflow", + real_project_root_dir=real_project_root_dir, + ) + combined = stdout + stderr + assert return_code == 0 + assert "Will execute the following tasks:" in combined + assert "Starting:" in combined + assert "report:" in combined + assert "reformatted" not in combined + + +@pytest.mark.usefixtures("mock_lightweight_project") +def test_trace_log_level_shows_task_output( + mock_project_root: Path, make_command_prefix: list[str], real_project_root_dir: Path +) -> None: + """At LOG_LEVEL=TRACE, task stdout/stderr (e.g. formatter output) is present.""" + return_code, stdout, stderr = run_command_and_save_logs( + args=[*make_command_prefix, "LOG_LEVEL=TRACE", "format"], + cwd=mock_project_root, + test_name="test_trace_log_level_shows_task_output", + real_project_root_dir=real_project_root_dir, + ) + combined = stdout + stderr + assert return_code == 0 + assert "Will execute the following tasks:" in combined + assert ( + "reformatted" in combined + or "All checks passed" in combined + or "ruff" in combined + ) + + +@pytest.mark.usefixtures("mock_lightweight_project") +def test_debug_log_level_shows_command_lines( + mock_project_root: Path, make_command_prefix: list[str], real_project_root_dir: Path +) -> None: + """At LOG_LEVEL=DEBUG, command lines are logged (inner docker run).""" + return_code, stdout, stderr = run_command_and_save_logs( + args=[*make_command_prefix, "LOG_LEVEL=DEBUG", "format"], + cwd=mock_project_root, + test_name="test_debug_log_level_shows_command_lines", + real_project_root_dir=real_project_root_dir, + ) + combined = stdout + stderr + assert return_code == 0 + assert "docker run" in combined + assert "ruff" in combined + + +@pytest.mark.usefixtures("mock_lightweight_project") +def test_failure_visible_at_default_log_level( + mock_project_root: Path, make_command_prefix: list[str], real_project_root_dir: Path +) -> None: + """On task failure, the failure and failing command are visible at default level.""" + get_python_subproject( + project_root=mock_project_root, + subproject_context=SubprojectContext.BUILD_SUPPORT, + ).get_test_dir().joinpath("test_type_error_for_logging_test.py").write_text( + """def some_function(items) -> list: + \"\"\"func docstring\"\"\" + return items +""" + ) + return_code, stdout, stderr = run_command_and_save_logs( + args=[*make_command_prefix, "type_check_build_support"], + cwd=mock_project_root, + test_name="test_failure_visible_at_default_log_level", + real_project_root_dir=real_project_root_dir, + expect_failure=True, + ) + combined = stdout + stderr + assert return_code != 0 + assert "Failed with code:" in combined diff --git a/build_support/test/test_utils/command_runner.py b/build_support/test/test_utils/command_runner.py index 93f5e82..9a69450 100644 --- a/build_support/test/test_utils/command_runner.py +++ b/build_support/test/test_utils/command_runner.py @@ -1,10 +1,13 @@ """Utility for running commands and saving logs during tests.""" +import logging from pathlib import Path from subprocess import PIPE, Popen from build_support.ci_cd_vars.project_structure import get_feature_test_log_name +logger = logging.getLogger(__name__) + def run_command_and_save_logs( args: list[str], @@ -21,7 +24,7 @@ def run_command_and_save_logs( test_name (str): Name of the test (used for log file naming). real_project_root_dir (Path): Real project root directory. expect_failure (bool): If True, a non-zero return code will not trigger - printing of the log content to stdout. Default False. + logging of the log content at ERROR level. Default False. Returns: tuple[int, str, str]: Return code, stdout, and stderr. @@ -54,6 +57,6 @@ def run_command_and_save_logs( log_file.write_text(log_content, encoding="utf-8") if (return_code != 0) and not expect_failure: - print(log_content, flush=True) # noqa: T201 + logger.error("%s", log_content) return return_code, stdout, stderr diff --git a/build_support/test/unit_tests/ci_cd_tasks/test_build_tasks.py b/build_support/test/unit_tests/ci_cd_tasks/test_build_tasks.py index 722b12a..f79341c 100644 --- a/build_support/test/unit_tests/ci_cd_tasks/test_build_tasks.py +++ b/build_support/test/unit_tests/ci_cd_tasks/test_build_tasks.py @@ -75,7 +75,7 @@ def test_run_build_pypi(basic_task_info: BasicTaskInfo) -> None: ), "uv", "build", - "--output", + "--out-dir", get_dist_dir(project_root=basic_task_info.docker_project_root), ] ) diff --git a/build_support/test/unit_tests/new_project_setup/test_new_project_data_models.py b/build_support/test/unit_tests/new_project_setup/test_new_project_data_models.py index c154093..e3e5dfc 100644 --- a/build_support/test/unit_tests/new_project_setup/test_new_project_data_models.py +++ b/build_support/test/unit_tests/new_project_setup/test_new_project_data_models.py @@ -68,3 +68,14 @@ def test_org_formatted_name_and_email( project_setting = ProjectSettings.model_validate(project_settings_data_dict) org = project_setting.organization assert org.formatted_name_and_email() == "Someone Nice " + + +def test_org_as_pyproject_author( + project_settings_data_dict: dict[str, Any], +) -> None: + project_setting = ProjectSettings.model_validate(project_settings_data_dict) + org = project_setting.organization + assert org.as_pyproject_author() == { + "name": "Someone Nice", + "email": "someone@nice.com", + } diff --git a/build_support/test/unit_tests/new_project_setup/test_setup_new_project.py b/build_support/test/unit_tests/new_project_setup/test_setup_new_project.py index 44e2ab5..80a3a32 100644 --- a/build_support/test/unit_tests/new_project_setup/test_setup_new_project.py +++ b/build_support/test/unit_tests/new_project_setup/test_setup_new_project.py @@ -69,7 +69,7 @@ def _check_pyproject_toml( assert (pyproject_toml_data["project"]["version"] == "0.0.0") == version_reset assert _license_value(pyproject_toml_data["project"]["license"]) == settings.license assert pyproject_toml_data["project"]["authors"] == [ - settings.organization.formatted_name_and_email() + settings.organization.as_pyproject_author() ] wheel_packages = pyproject_toml_data["tool"]["hatch"]["build"]["targets"]["wheel"][ "packages" diff --git a/build_support/test/unit_tests/new_project_setup/test_update_pyproject_toml.py b/build_support/test/unit_tests/new_project_setup/test_update_pyproject_toml.py index 7d123d8..dcd0d1c 100644 --- a/build_support/test/unit_tests/new_project_setup/test_update_pyproject_toml.py +++ b/build_support/test/unit_tests/new_project_setup/test_update_pyproject_toml.py @@ -37,7 +37,7 @@ def test_update_pyproject_toml(tmp_path: Path, real_project_root_dir: Path) -> N expected_project["version"] = "0.0.0" # type: ignore[index] expected_project["license"] = new_project_settings.license # type: ignore[index] expected_project["authors"] = [ # type: ignore[index] - new_project_settings.organization.formatted_name_and_email() + new_project_settings.organization.as_pyproject_author() ] expected_hatch = expected_data["tool"]["hatch"] # type: ignore[index] expected_hatch["build"]["targets"]["wheel"]["packages"] = [ # type: ignore[index] diff --git a/build_support/test/unit_tests/test_build_logging.py b/build_support/test/unit_tests/test_build_logging.py new file mode 100644 index 0000000..ba9eda4 --- /dev/null +++ b/build_support/test/unit_tests/test_build_logging.py @@ -0,0 +1,46 @@ +"""Unit tests for build_logging.""" + +import logging +from unittest.mock import patch + +from build_support.build_logging import ( + TRACE, + _configure_build_logging, + register_trace_level, +) + +# Standard numeric level for TRACE (below logging.DEBUG). +_TRACE_LEVEL_VALUE = 5 + + +def test_trace_level_value() -> None: + """TRACE is below DEBUG (10) so it can be used for verbose task output.""" + assert TRACE == _TRACE_LEVEL_VALUE + assert TRACE < logging.DEBUG + + +def test_register_trace_level() -> None: + """register_trace_level makes LOG_LEVEL=TRACE and getLevelName work.""" + register_trace_level() + assert logging.getLevelName(TRACE) == "TRACE" + + +def test_configure_build_logging_accepts_trace() -> None: + """_configure_build_logging sets root level to TRACE when LOG_LEVEL=TRACE.""" + with patch.dict("os.environ", {"LOG_LEVEL": "TRACE"}, clear=False): + _configure_build_logging() + assert logging.root.level == TRACE + + +def test_configure_build_logging_invalid_level_falls_back_to_info() -> None: + """_configure_build_logging falls back to INFO for invalid LOG_LEVEL.""" + with patch.dict("os.environ", {"LOG_LEVEL": "INVALID"}, clear=False): + _configure_build_logging() + assert logging.root.level == logging.INFO + + +def test_configure_build_logging_accepts_standard_level() -> None: + """_configure_build_logging uses standard level when LOG_LEVEL is DEBUG.""" + with patch.dict("os.environ", {"LOG_LEVEL": "DEBUG"}, clear=False): + _configure_build_logging() + assert logging.root.level == logging.DEBUG diff --git a/build_support/test/unit_tests/test_execute_build_steps.py b/build_support/test/unit_tests/test_execute_build_steps.py index 27964d5..3c806fe 100644 --- a/build_support/test/unit_tests/test_execute_build_steps.py +++ b/build_support/test/unit_tests/test_execute_build_steps.py @@ -40,7 +40,7 @@ run_main, ) from build_support.new_project_setup.setup_new_project import MakeProjectFromTemplate -from build_support.process_runner import ProcessVerbosity, concatenate_args +from build_support.process_runner import concatenate_args def test_constants_not_changed_by_accident() -> None: @@ -104,9 +104,7 @@ def test_fix_permissions(real_project_root_dir: Path) -> None: ] ) fix_permissions(local_user_uid=1337, local_user_gid=42) - run_process_mock.assert_called_once_with( - args=fix_permissions_args, verbosity=ProcessVerbosity.SILENT - ) + run_process_mock.assert_called_once_with(args=fix_permissions_args) @pytest.fixture(params=[Path("usr/dev"), Path("user/dev")]) @@ -270,14 +268,15 @@ def test_run_main_exception(cli_arg_combo: BasicTaskInfo) -> None: ) with ( patch("build_support.execute_build_steps.run_tasks") as mock_run_tasks, - patch("builtins.print") as mock_print, + patch("build_support.execute_build_steps.logger") as mock_logger, patch( "build_support.execute_build_steps.fix_permissions" ) as mock_fix_permissions, ): error_to_raise = RuntimeError("error_message") mock_run_tasks.side_effect = error_to_raise - run_main(args) + with pytest.raises(RuntimeError, match="error_message"): + run_main(args) requested_tasks = [ CLI_ARG_TO_TASK[arg].get_task_node(basic_task_info=cli_arg_combo) for arg in args.build_tasks @@ -285,7 +284,7 @@ def test_run_main_exception(cli_arg_combo: BasicTaskInfo) -> None: mock_run_tasks.assert_called_once_with( tasks=requested_tasks, project_root=cli_arg_combo.docker_project_root ) - mock_print.assert_called_once_with(error_to_raise) + mock_logger.exception.assert_called_once_with("Build failed") mock_fix_permissions.assert_called_once_with( local_user_uid=cli_arg_combo.local_uid, local_user_gid=cli_arg_combo.local_gid, diff --git a/build_support/test/unit_tests/test_process_runner.py b/build_support/test/unit_tests/test_process_runner.py index 791ed09..5c328ba 100644 --- a/build_support/test/unit_tests/test_process_runner.py +++ b/build_support/test/unit_tests/test_process_runner.py @@ -1,10 +1,11 @@ -import sys +import logging from pathlib import Path from subprocess import PIPE from unittest.mock import Mock, call, patch +import pytest from build_support.process_runner import ( - ProcessVerbosity, + TRACE, concatenate_args, get_output_of_process, get_str_args, @@ -33,135 +34,113 @@ def test_concatenate_args() -> None: ] -def test_resolve_process_results_normal_process() -> None: - with patch("builtins.print") as mock_print: - resolve_process_results( - command_as_str="run a process", - output=b"output", - error=b"", - return_code=0, - verbosity=ProcessVerbosity.ALL, - ) - mock_print.assert_called_once_with("output", flush=True, end="") +def test_resolve_process_results_normal_process( + caplog: pytest.LogCaptureFixture, +) -> None: + caplog.set_level(TRACE, logger="build_support.process_runner") + resolve_process_results( + command_as_str="run a process", output=b"output", error=b"", return_code=0 + ) + assert "output" in caplog.text + assert len(caplog.records) == 1 + assert caplog.records[0].levelno == TRACE -def test_resolve_process_results_normal_process_exit_1() -> None: - with ( - patch("builtins.print") as mock_print, - patch("build_support.process_runner.sys.exit") as mock_exit, - ): +def test_resolve_process_results_normal_process_exit_1( + caplog: pytest.LogCaptureFixture, +) -> None: + caplog.set_level(logging.ERROR, logger="build_support.process_runner") + with patch("build_support.process_runner.sys.exit") as mock_exit: resolve_process_results( - command_as_str="run a process", - output=b"output", - error=b"", - return_code=1, - verbosity=ProcessVerbosity.ALL, + command_as_str="run a process", output=b"output", error=b"", return_code=1 ) - mock_print.assert_called_once_with("output", flush=True, end="") - mock_exit.assert_called_once_with(1) + assert "output" in caplog.text + assert "Failed with code: 1" in caplog.text + error_records = [r for r in caplog.records if r.levelno == logging.ERROR] + expected_error_count = 2 + assert len(error_records) == expected_error_count + mock_exit.assert_called_once_with(1) -def test_resolve_process_results_normal_process_has_error_text() -> None: - with patch("builtins.print") as mock_print: - resolve_process_results( - command_as_str="run a process", - output=b"output", - error=b"error", - return_code=0, - verbosity=ProcessVerbosity.ALL, - ) - expected_print_calls = [ - call("output", flush=True, end=""), - call("error", flush=True, end="", file=sys.stderr), - ] - assert mock_print.call_count == len(expected_print_calls) - mock_print.assert_has_calls(calls=expected_print_calls) +def test_resolve_process_results_normal_process_has_error_text( + caplog: pytest.LogCaptureFixture, +) -> None: + caplog.set_level(TRACE, logger="build_support.process_runner") + resolve_process_results( + command_as_str="run a process", output=b"output", error=b"error", return_code=0 + ) + assert "output" in caplog.text + assert "error" in caplog.text + expected_trace_count = 2 + assert len(caplog.records) == expected_trace_count + assert all(r.levelno == TRACE for r in caplog.records) -def test_resolve_process_results_normal_process_has_error_text_exit_1() -> None: - with ( - patch("builtins.print") as mock_print, - patch("build_support.process_runner.sys.exit") as mock_exit, - ): - resolve_process_results( - command_as_str="run a process", - output=b"output", - error=b"error", - return_code=1, - verbosity=ProcessVerbosity.ALL, - ) - expected_print_calls = [ - call("output", flush=True, end=""), - call("error", flush=True, end="", file=sys.stderr), - ] - assert mock_print.call_count == len(expected_print_calls) - mock_print.assert_has_calls(calls=expected_print_calls) - mock_exit.assert_called_once_with(1) +def test_resolve_process_results_success_stderr_only( + caplog: pytest.LogCaptureFixture, +) -> None: + caplog.set_level(TRACE, logger="build_support.process_runner") + resolve_process_results( + command_as_str="run a process", output=b"", error=b"stderr only", return_code=0 + ) + assert "stderr only" in caplog.text + assert len(caplog.records) == 1 + assert caplog.records[0].levelno == TRACE -def test_resolve_process_results_normal_process_silent() -> None: - with patch("builtins.print") as mock_print: +def test_resolve_process_results_normal_process_has_error_text_exit_1( + caplog: pytest.LogCaptureFixture, +) -> None: + caplog.set_level(logging.ERROR, logger="build_support.process_runner") + with patch("build_support.process_runner.sys.exit") as mock_exit: resolve_process_results( command_as_str="run a process", output=b"output", error=b"error", - return_code=0, - verbosity=ProcessVerbosity.SILENT, + return_code=1, ) - assert mock_print.call_count == 1 - mock_print.assert_called_once_with("error", flush=True, end="", file=sys.stderr) + assert "output" in caplog.text + assert "error" in caplog.text + assert "Failed with code: 1" in caplog.text + error_records = [r for r in caplog.records if r.levelno == logging.ERROR] + expected_error_count = 3 + assert len(error_records) == expected_error_count + mock_exit.assert_called_once_with(1) -def test_resolve_process_results_normal_process_silent_exit_1() -> None: - with ( - patch("builtins.print") as mock_print, - patch("build_support.process_runner.sys.exit") as mock_exit, - ): +def test_resolve_process_results_failure_stderr_only( + caplog: pytest.LogCaptureFixture, +) -> None: + caplog.set_level(logging.ERROR, logger="build_support.process_runner") + with patch("build_support.process_runner.sys.exit") as mock_exit: resolve_process_results( command_as_str="run a process", - output=b"output", - error=b"error", + output=b"", + error=b"stderr only", return_code=1, - verbosity=ProcessVerbosity.SILENT, ) - expected_print_calls = [ - call("error", flush=True, end="", file=sys.stderr), - call("run a process\nFailed with code: 1", flush=True, file=sys.stderr), - ] - assert mock_print.call_count == len(expected_print_calls) - mock_print.assert_has_calls(calls=expected_print_calls) - mock_exit.assert_called_once_with(1) + assert "Failed with code: 1" in caplog.text + assert "stderr only" in caplog.text + error_records = [r for r in caplog.records if r.levelno == logging.ERROR] + expected_error_count = 2 + assert len(error_records) == expected_error_count + mock_exit.assert_called_once_with(1) -def test_run_process() -> None: - with ( - patch("build_support.process_runner.Popen") as mock_popen, - patch( - "build_support.process_runner.resolve_process_results" - ) as mock_resolve_process_results, - ): - process_mock = Mock() - process_mock.communicate = Mock() - process_mock.communicate.return_value = b"output", b"error" - process_mock.returncode = 0 - mock_popen.return_value = process_mock - assert run_process(args=["command", 0, 1.5, Path("/usr/dev")]) == b"output" - mock_popen.assert_called_once_with( - args=["command", "0", "1.5", "/usr/dev"], - stdin=None, - stdout=PIPE, - stderr=PIPE, - ) - mock_resolve_process_results.assert_called_once_with( - command_as_str="command 0 1.5 /usr/dev", - output=b"output", - error=b"error", - return_code=0, - verbosity=ProcessVerbosity.ALL, - ) +def test_resolve_process_results_at_info_level_suppresses_trace_output( + caplog: pytest.LogCaptureFixture, +) -> None: + """At INFO level, stdout/stderr are not logged (TRACE only).""" + caplog.set_level(logging.INFO, logger="build_support.process_runner") + resolve_process_results( + command_as_str="run a process", output=b"output", error=b"error", return_code=0 + ) + assert "output" not in caplog.text + assert "error" not in caplog.text + assert len(caplog.records) == 0 -def test_run_process_silent() -> None: +def test_run_process() -> None: with ( patch("build_support.process_runner.Popen") as mock_popen, patch( @@ -173,13 +152,7 @@ def test_run_process_silent() -> None: process_mock.communicate.return_value = b"output", b"error" process_mock.returncode = 0 mock_popen.return_value = process_mock - assert ( - run_process( - args=["command", 0, 1.5, Path("/usr/dev")], - verbosity=ProcessVerbosity.SILENT, - ) - == b"output" - ) + assert run_process(args=["command", 0, 1.5, Path("/usr/dev")]) == b"output" mock_popen.assert_called_once_with( args=["command", "0", "1.5", "/usr/dev"], stdin=None, @@ -191,7 +164,6 @@ def test_run_process_silent() -> None: output=b"output", error=b"error", return_code=0, - verbosity=ProcessVerbosity.SILENT, ) @@ -204,10 +176,10 @@ def test_get_output_of_process() -> None: ) -def test_run_piped_processes() -> None: +def test_run_piped_processes(caplog: pytest.LogCaptureFixture) -> None: + caplog.set_level(logging.DEBUG, logger="build_support.process_runner") with ( patch("build_support.process_runner.Popen") as mock_popen, - patch("builtins.print") as mock_print, patch( "build_support.process_runner.resolve_process_results" ) as mock_resolve_process_results, @@ -242,15 +214,17 @@ def test_run_piped_processes() -> None: output=b"output", error=b"error", return_code=0, - verbosity=ProcessVerbosity.ALL, ) - mock_print.assert_called_once_with(command_as_str, flush=True) + assert command_as_str in caplog.text -def test_run_piped_processes_silent() -> None: +def test_run_piped_processes_at_info_level_hides_command( + caplog: pytest.LogCaptureFixture, +) -> None: + """At INFO level the command line is not logged (it requires DEBUG).""" + caplog.set_level(logging.INFO, logger="build_support.process_runner") with ( patch("build_support.process_runner.Popen") as mock_popen, - patch("builtins.print") as mock_print, patch( "build_support.process_runner.resolve_process_results" ) as mock_resolve_process_results, @@ -261,40 +235,22 @@ def test_run_piped_processes_silent() -> None: process_mock.returncode = 0 mock_popen.return_value = process_mock run_piped_processes( - processes=[["command", 0, 1.5, Path("/usr/dev")], ["second_command", 1337]], - verbosity=ProcessVerbosity.SILENT, + processes=[["command", 0, 1.5, Path("/usr/dev")], ["second_command", 1337]] ) command_as_str = "command 0 1.5 /usr/dev | second_command 1337" - expected_popen_calls = [ - call( - args=["command", "0", "1.5", "/usr/dev"], - stdin=None, - stdout=PIPE, - stderr=PIPE, - ), - call( - args=["second_command", "1337"], - stdin=process_mock.stdout, - stdout=PIPE, - stderr=PIPE, - ), - ] - assert mock_popen.call_count == len(expected_popen_calls) - mock_popen.assert_has_calls(calls=expected_popen_calls) mock_resolve_process_results.assert_called_once_with( command_as_str=command_as_str, output=b"output", error=b"error", return_code=0, - verbosity=ProcessVerbosity.SILENT, ) - mock_print.assert_not_called() + assert command_as_str not in caplog.text -def test_run_piped_processes_one_process() -> None: +def test_run_piped_processes_one_process(caplog: pytest.LogCaptureFixture) -> None: + caplog.set_level(logging.DEBUG, logger="build_support.process_runner") with ( patch("build_support.process_runner.Popen") as mock_popen, - patch("builtins.print") as mock_print, patch( "build_support.process_runner.resolve_process_results" ) as mock_resolve_process_results, @@ -317,15 +273,17 @@ def test_run_piped_processes_one_process() -> None: output=b"output", error=b"error", return_code=0, - verbosity=ProcessVerbosity.ALL, ) - mock_print.assert_called_once_with(command_as_str, flush=True) + assert command_as_str in caplog.text -def test_run_piped_processes_one_process_silent() -> None: +def test_run_piped_processes_one_process_at_info_level_hides_command( + caplog: pytest.LogCaptureFixture, +) -> None: + """At INFO level the command line is not logged (it requires DEBUG).""" + caplog.set_level(logging.INFO, logger="build_support.process_runner") with ( patch("build_support.process_runner.Popen") as mock_popen, - patch("builtins.print") as mock_print, patch( "build_support.process_runner.resolve_process_results" ) as mock_resolve_process_results, @@ -335,22 +293,12 @@ def test_run_piped_processes_one_process_silent() -> None: process_mock.communicate.return_value = b"output", b"error" process_mock.returncode = 0 mock_popen.return_value = process_mock - run_piped_processes( - processes=[["command", 0, 1.5, Path("/usr/dev")]], - verbosity=ProcessVerbosity.SILENT, - ) + run_piped_processes(processes=[["command", 0, 1.5, Path("/usr/dev")]]) command_as_str = "command 0 1.5 /usr/dev" - mock_popen.assert_called_once_with( - args=["command", "0", "1.5", "/usr/dev"], - stdin=None, - stdout=PIPE, - stderr=PIPE, - ) mock_resolve_process_results.assert_called_once_with( command_as_str=command_as_str, output=b"output", error=b"error", return_code=0, - verbosity=ProcessVerbosity.SILENT, ) - mock_print.assert_not_called() + assert command_as_str not in caplog.text diff --git a/docs/developer_tooling.rst b/docs/developer_tooling.rst index b31c57f..806607b 100755 --- a/docs/developer_tooling.rst +++ b/docs/developer_tooling.rst @@ -8,6 +8,19 @@ machine specific environment variables (such as the directory location) and so w a Makefile to get these variables and pass them to the docker commands. Because of this all of the standard commands are accessed via `make` commands. +Build output verbosity is controlled by the :code:`LOG_LEVEL` environment variable. +Pass it to :code:`make` (e.g. :code:`make test LOG_LEVEL=DEBUG`) to change how much the +build prints. Three levels apply to successful runs: + +* **INFO** (default): Workflow only — task list, "Starting:", run report. No command + lines or task output. +* **DEBUG**: Level 1 plus the command line before each subprocess (e.g. the + :code:`docker run` invocation for each task). +* **TRACE**: Level 2 plus task stdout and stderr with "stdout:" and "stderr:" labels. + +On task failure, the failing command, return code, and that command's stdout and +stderr are always logged at ERROR, so they are visible at any :code:`LOG_LEVEL`. + Everything runs in Docker with Python dependencies managed by uv. Running uv commands (e.g. :code:`uv lock`) outside the dev container can leave the environment out of sync. Follow the instructions below when changing dependencies. diff --git a/docs/tickets/template_python_project/95-replace-print-with-logging.rst b/docs/tickets/template_python_project/95-replace-print-with-logging.rst new file mode 100644 index 0000000..9ce84db --- /dev/null +++ b/docs/tickets/template_python_project/95-replace-print-with-logging.rst @@ -0,0 +1,102 @@ +Replace Print Statements with Configurable Logging Levels +========================================================= + +Overview +-------- +Replace direct ``print()`` usage in the build pipeline with a standard logging +facility and configurable log level. This lets developers and agents choose how +much output they see when running ``make`` (e.g. quiet workflow-only vs. full +command and task output), reducing noise while keeping important progress and +failure information visible. + +Requirements +------------ + +User Flow +~~~~~~~~~ +1. User runs any ``make`` target (e.g. ``make test``, ``make format``). +2. Build runs inside the build container; amount of console output depends on + the configured log level (e.g. via environment variable or Make variable). +3. At the most verbose level, behavior matches current output (workflow lines, + commands, and full task stdout/stderr). At quieter levels, only higher-level + workflow messages (and failures) are shown. + +Output Tiers and Suggested Log Levels +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Map existing output to logging levels so that a single level controls what is +printed. Suggested mapping: + +**Highest level — workflow only (e.g. default or INFO):** + +- "Will execute the following tasks:" +- Bullet list of task names +- "Starting: " per task +- Build run report (YAML summary of task durations) + +**Mid level — commands (e.g. DEBUG):** + +- The exact command line for each subprocess (e.g. ``docker run ... python ...``) + as currently printed by ``process_runner`` before running. + +**Highest verbosity — task output (e.g. DEBUG or a dedicated verbose level):** + +- Full stdout/stderr of each task (e.g. pytest output, coverage report, linter + output) as currently streamed by ``process_runner.resolve_process_results``. + +**Always shown (independent of level):** + +- Failures: non-zero exit codes and the command that failed (already partially + handled when ``ProcessVerbosity.SILENT`` and failure). +- Unhandled exceptions in the build entrypoint (e.g. in ``execute_build_steps``) + should be logged at ERROR and/or re-raised so they are visible. + +Functional Requirements +~~~~~~~~~~~~~~~~~~~~~~~ +- Introduce a single, consistent way to set log level for the build (e.g. + ``LOG_LEVEL`` or ``BUILD_LOG_LEVEL``) that is respected by all build_support + code that currently uses ``print()``. Prefer standard library ``logging``. +- Replace all relevant ``print()`` calls in the build pipeline with appropriate + log level calls. Key locations: + - ``build_support/dag_engine.py``: workflow messages and run report (INFO or + equivalent for default visibility). + - ``build_support/process_runner.py``: command line (DEBUG), subprocess + stdout/stderr (DEBUG or verbose), and failure message on non-zero exit + (ERROR or always-visible). + - ``build_support/execute_build_steps.py``: exception handler should use + logging (ERROR) instead of ``print(e)``. + - ``build_support/report_build_var.py``: output is the primary result of the + script (docker command string). Either keep as the script’s intended stdout + result (not “logging”) or document that it is emitted at a specific level + when called from the build. +- Default log level should give a quiet-but-useful experience (e.g. workflow + only), with an option to increase verbosity (e.g. ``make test + LOG_LEVEL=DEBUG`` or equivalent) for debugging. +- Existing ``ProcessVerbosity`` (SILENT vs ALL) may be refactored or replaced + by the new log-level behavior so that “silent” corresponds to minimal logging + and “all” to maximum verbosity. + +Acceptance Criteria / Feature Tests +----------------------------------- +- A feature test (or equivalent) verifies that when the build runs with the + default log level, only workflow-level messages appear (task list, “Starting: + …”, run report); no raw command lines and no full task output (e.g. no pytest + session log) in the captured stdout/stderr. +- A feature test verifies that when the build runs with a verbose/debug log + level, command lines are present in the output. +- A feature test verifies that when the build runs with maximum verbosity, full + task output (e.g. pytest output) is present in the output. +- A feature test verifies that on task failure (non-zero exit), the failure + and the failing command (or equivalent) are visible even at the quietest + level. +- Unit tests for build_support modules that emit output are updated to assert + on logging calls or log-level behavior where appropriate; 100% coverage is + maintained. + +Notes +----- +- ``report_build_var.py`` output is treated as data for Makefile consumption, + not logging. It is emitted directly to stdout via ``sys.stdout.write(...)`` + so callers can capture the exact value without logging metadata. +- Consider documenting the chosen env/Make variable (e.g. ``LOG_LEVEL``) in + ``docs/developer_tooling.rst`` or similar so developers and agents know how + to reduce or increase build output. diff --git a/pyproject.toml b/pyproject.toml index cc4c5cd..f1af081 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,12 +63,12 @@ enable_error_code = [ [project] name = "template_python_project" -version = "0.2.39" +version = "0.2.40" packages = [{include = "template_python_project", from="pypi_package/src"}] description = "A project that can be used as a template to provide some CI/CD out of the box." readme = "README.md" license = { text = "unlicense" } -authors = ["Alexander Olson "] +authors = [{ name = "Alexander Olson", email = "alec.g.olson@gmail.com" }] repository = "https://github.com/alec-g-olson/TemplatePythonProject" # If python version changed here change: # - base image in Dockerfile