diff --git a/.coderabbit.yaml b/.coderabbit.yaml new file mode 100644 index 00000000..cbac4179 --- /dev/null +++ b/.coderabbit.yaml @@ -0,0 +1,106 @@ +# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json +# +# CodeRabbit aligns with AGENTS.md: contract-first APIs, OpenSpec, Hatch quality gates. +# `reviews.auto_review.base_branches` includes `dev` so PRs into `dev` are auto-reviewed (not only the +# repo default branch). See https://docs.coderabbit.ai/reference/configuration (auto_review). +# Pre-push / finalize: run `cr --base dev` (or `coderabbit review`) from repo root; see +# https://docs.coderabbit.ai/cli/overview +# PR description: include `@coderabbitai summary` (default placeholder) for the high-level summary. +# +language: "en-US" +early_access: false +tone_instructions: >- + Provide architectural insights on system design, scalability, and long-term maintainability. + Prioritize contract-first public APIs (@icontract, @beartype), Typer/Rich CLI boundaries, OpenSpec + traceability, and cross-repo impact for bundled modules. + +reviews: + profile: assertive + request_changes_workflow: false + high_level_summary: true + high_level_summary_in_walkthrough: true + review_details: true + sequence_diagrams: true + estimate_code_review_effort: true + assess_linked_issues: true + related_issues: true + related_prs: true + poem: false + collapse_walkthrough: true + changed_files_summary: true + review_status: true + commit_status: true + high_level_summary_instructions: | + Structure the summary for SpecFact CLI maintainers: + - User-visible behavior and CLI surface (commands, options, defaults). + - Contract/API impact: public functions, Pydantic models, module package boundaries. + - Testing and quality gates: contracts, hatch run contract-test, coverage implications. + - If applicable: OpenSpec change ID, docs/ and CHANGELOG touchpoints, module signing / version bumps. + auto_review: + enabled: true + drafts: false + auto_incremental_review: true + # PRs targeting `dev` (not only the GitHub default branch, e.g. `main`) get automatic reviews. + base_branches: + - "^dev$" + path_instructions: + - path: "src/specfact_cli/**/*.py" + instructions: | + Focus on modular CLI architecture: lazy module loading, registry/bootstrap patterns, and + dependency direction. Flag breaking changes to public APIs, Pydantic models, and resource + bundling. Verify @icontract + @beartype on public surfaces; prefer centralized logging + (get_bridge_logger) over print(). + - path: "openspec/**/*.md" + instructions: | + Treat as specification source of truth: proposal/tasks/spec deltas vs. code behavior, + CHANGE_ORDER consistency, and scenario coverage. Surface drift between OpenSpec and + implementation. + - path: "tests/**/*.py" + instructions: | + Contract-first testing: meaningful scenarios, not redundant assertions already covered by + contracts. Flag flakiness, environment coupling, and missing coverage for changed behavior. + - path: ".github/workflows/**" + instructions: | + CI safety: secrets usage, workflow dependencies, alignment with hatch test / contract-test + gates, and action versions. + - path: "scripts/**/*.py" + instructions: | + Deterministic tooling: subprocess safety, Hatch integration, and parity with documented + quality gates (format, type-check, module signing). + - path: "docs/**/*.md" + instructions: | + User-facing accuracy: CLI examples match current behavior; preserve Jekyll front matter; + call out when README/docs index need sync. + + tools: + ruff: + enabled: true + semgrep: + enabled: true + config_file: "tools/semgrep/" + yamllint: + enabled: true + actionlint: + enabled: true + shellcheck: + enabled: true + + pre_merge_checks: + title: + mode: warning + requirements: "Prefer Conventional Commits-style prefixes (feat:, fix:, docs:, test:, refactor:, chore:)." + issue_assessment: + mode: warning + +knowledge_base: + learnings: + scope: local + linked_repositories: + - repository: "nold-ai/specfact-cli-modules" + instructions: >- + Companion repo: bundled module packages (module-package.yaml), IDE prompts, and + modules.specfact.io content. When CLI surface, imports, or bundled module contracts change, + flag required updates, version bumps, and signature verification impacts. + +chat: + auto_reply: true diff --git a/.github/workflows/docs-review.yml b/.github/workflows/docs-review.yml index b722c4d5..3afa28db 100644 --- a/.github/workflows/docs-review.yml +++ b/.github/workflows/docs-review.yml @@ -9,9 +9,16 @@ on: - "**/*.md" - "**/*.mdc" - "docs/**" + - "docs/.doc-frontmatter-enforced" - "tests/unit/docs/**" + - "tests/unit/scripts/test_doc_frontmatter/**" + - "tests/integration/scripts/test_doc_frontmatter/**" + - "tests/helpers/doc_frontmatter.py" + - "tests/helpers/doc_frontmatter_fixtures.py" + - "tests/helpers/doc_frontmatter_types.py" - "scripts/check-docs-commands.py" - "scripts/check-cross-site-links.py" + - "scripts/check_doc_frontmatter.py" - "pyproject.toml" - ".github/workflows/docs-review.yml" push: @@ -20,9 +27,16 @@ on: - "**/*.md" - "**/*.mdc" - "docs/**" + - "docs/.doc-frontmatter-enforced" - "tests/unit/docs/**" + - "tests/unit/scripts/test_doc_frontmatter/**" + - "tests/integration/scripts/test_doc_frontmatter/**" + - "tests/helpers/doc_frontmatter.py" + - "tests/helpers/doc_frontmatter_fixtures.py" + - "tests/helpers/doc_frontmatter_types.py" - "scripts/check-docs-commands.py" - "scripts/check-cross-site-links.py" + - "scripts/check_doc_frontmatter.py" - "pyproject.toml" - ".github/workflows/docs-review.yml" workflow_dispatch: @@ -62,11 +76,14 @@ jobs: continue-on-error: true run: hatch run check-cross-site-links --warn-only - - name: Run docs review suite + - name: Validate documentation frontmatter (enforced paths; aligns with local docs-validate / pre-commit) + run: hatch run doc-frontmatter-check + + - name: Run docs review test suites run: | mkdir -p logs/docs-review DOCS_REVIEW_LOG="logs/docs-review/docs-review_$(date -u +%Y%m%d_%H%M%S).log" - hatch run pytest tests/unit/docs/ -q 2>&1 | tee "$DOCS_REVIEW_LOG" + hatch run pytest tests/unit/docs/ tests/unit/scripts/test_doc_frontmatter/ tests/integration/scripts/test_doc_frontmatter/ -q 2>&1 | tee "$DOCS_REVIEW_LOG" exit "${PIPESTATUS[0]:-$?}" - name: Upload docs review logs diff --git a/.github/workflows/pr-orchestrator.yml b/.github/workflows/pr-orchestrator.yml index f95470e6..be22944a 100644 --- a/.github/workflows/pr-orchestrator.yml +++ b/.github/workflows/pr-orchestrator.yml @@ -151,7 +151,6 @@ jobs: uses: actions/cache@v4 with: path: | - ~/.local/share/hatch ~/.cache/uv key: ${{ runner.os }}-hatch-tests-py312-${{ hashFiles('pyproject.toml', 'src/specfact_cli/modules/*/__init__.py') }} restore-keys: | @@ -219,7 +218,7 @@ jobs: compat-py311: name: Compatibility (Python 3.11) runs-on: ubuntu-latest - needs: [changes, tests] + needs: [changes, verify-module-signatures] if: needs.changes.outputs.skip_tests_dev_to_main != 'true' permissions: contents: read @@ -248,7 +247,6 @@ jobs: uses: actions/cache@v4 with: path: | - ~/.local/share/hatch ~/.cache/uv key: ${{ runner.os }}-hatch-compat-py311-${{ hashFiles('pyproject.toml', 'src/specfact_cli/modules/*/__init__.py') }} restore-keys: | @@ -274,7 +272,7 @@ jobs: contract-first-ci: name: Contract-First CI runs-on: ubuntu-latest - needs: [changes, tests, compat-py311] + needs: [changes, verify-module-signatures] if: needs.changes.outputs.skip_tests_dev_to_main != 'true' permissions: contents: read @@ -303,7 +301,6 @@ jobs: uses: actions/cache@v4 with: path: | - ~/.local/share/hatch ~/.cache/uv key: ${{ runner.os }}-hatch-contract-first-py312-${{ hashFiles('pyproject.toml', 'src/specfact_cli/modules/*/__init__.py') }} restore-keys: | @@ -336,7 +333,7 @@ jobs: cli-validation: name: CLI Command Validation runs-on: ubuntu-latest - needs: [changes, contract-first-ci] + needs: [changes, verify-module-signatures] if: needs.changes.outputs.skip_tests_dev_to_main != 'true' permissions: contents: read @@ -414,7 +411,7 @@ jobs: type-checking: name: Type Checking (basedpyright) runs-on: ubuntu-latest - needs: [changes, tests] + needs: [changes, verify-module-signatures] if: needs.changes.outputs.skip_tests_dev_to_main != 'true' permissions: contents: read @@ -449,7 +446,7 @@ jobs: linting: name: Linting (ruff, pylint) runs-on: ubuntu-latest - needs: [changes, tests] + needs: [changes, verify-module-signatures] if: needs.changes.outputs.skip_tests_dev_to_main != 'true' permissions: contents: read diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e92d81bf..ef637d53 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,14 +1,23 @@ repos: - repo: local hooks: - - id: specfact-code-review-gate - name: Run code review gate on staged Python files - entry: hatch run python scripts/pre_commit_code_review.py - language: system - files: \.pyi?$ - id: verify-module-signatures name: Verify module signatures and version bumps entry: hatch run ./scripts/verify-modules-signature.py --require-signature --enforce-version-bump language: system pass_filenames: false always_run: true + - id: check-doc-frontmatter + name: Check documentation ownership frontmatter (enforced paths) + entry: hatch run doc-frontmatter-check + language: system + files: ^(docs/.*\.md|docs/\.doc-frontmatter-enforced|USAGE-FAQ\.md)$ + pass_filenames: false + always_run: false + - id: specfact-code-review-gate + name: Run code review gate on staged Python files + entry: hatch run python scripts/pre_commit_code_review.py + language: system + files: \.pyi?$ + # Show summary + copy-paste lines on success; pre-commit hides hook output otherwise. + verbose: true diff --git a/AGENTS.md b/AGENTS.md index e1ffc3cd..988e01cf 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -37,6 +37,9 @@ hatch run lint-workflows # GitHub Actions actionlint # Code scanning hatch run scan-all # semgrep analysis + +# SpecFact code review JSON (dogfood; see "SpecFact Code Review JSON" below and openspec/config.yaml) +hatch run specfact code review run --json --out .specfact/code-review.json ``` ## Architecture @@ -172,6 +175,36 @@ Run all steps in order before committing. Every step must pass with no errors. 5. `hatch run contract-test` # contract-first validation 6. `hatch run smart-test` # targeted test run (use `smart-test-full` for larger modifications) +With `pre-commit` installed (`pre-commit install`), staged `*.py` / `*.pyi` files also run the local code review gate (`scripts/pre_commit_code_review.py`), which writes the same `.specfact/code-review.json` path. That hook is fast feedback; it does not replace the **PR / change-completion** review rules in the next section when your OpenSpec tasks require a full-scope run. + +### SpecFact Code Review JSON (Dogfood, Quality Gate) + +This matches **`openspec/config.yaml`** (project `context` and **`rules.tasks`** for code review): treat **`.specfact/code-review.json`** as mandatory evidence before an OpenSpec change is considered complete and before you rely on “all gates green” for a PR. + +**When to (re)run the review** + +- The file is **missing**, or +- It is **stale**: the report’s last-modified time is older than any file you changed for this work under `src/`, `scripts/`, `tools/`, `tests/`, or under `openspec/changes//` **except** `openspec/changes//TDD_EVIDENCE.md` — evidence-only edits there do **not** by themselves invalidate the review; re-run when proposal, specs, tasks, design, or code change. + +**Command** + +```bash +hatch run specfact code review run --json --out .specfact/code-review.json +``` + +- While iterating on a branch, prefer a **changed-files scope** when available (e.g. `--scope changed`) so feedback stays fast. +- Before the **final PR** for a change, run a **full** (or equivalent) scope so the report covers the whole quality surface your tasks expect (e.g. `--scope full`). + +**Remediation** + +- Read the JSON report and fix **every** finding at any severity (warning, advisory, error, or equivalent in the schema) unless the change proposal documents a **rare, explicit, justified** exception. +- After substantive edits, re-run until the report shows a **passing** outcome from the review module (e.g. overall verdict PASS / CI exit 0 per schema). +- Record the review command(s) and timestamp in `openspec/changes//TDD_EVIDENCE.md` or in the PR description when the change touches behavior or quality gates. + +**Consistency** + +- OpenSpec change **`tasks.md`** should include explicit tasks for generating/updating this file and clearing findings (see `openspec/config.yaml` → `rules.tasks` → “SpecFact code review JSON”). Agent runs should treat those tasks and this section as the same bar. + ### Module Signature Gate (Required for Change Finalization) Before PR creation, every change MUST pass bundled module signature verification: diff --git a/CHANGELOG.md b/CHANGELOG.md index 74212e47..4d27d57e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,51 @@ All notable changes to this project will be documented in this file. --- +## [0.43.2] - 2026-03-29 + +### Added + +- Documentation ownership frontmatter rollout: + - `scripts/check_doc_frontmatter.py` + - `docs/.doc-frontmatter-enforced` (rollout scope) + - `hatch run doc-frontmatter-check` + - Pre-commit hook integration + - Guide: `docs/contributing/docs-sync.md` + - Sample pages include the extended schema; use `--all-docs` for a full-site check. + +### Fixed + +- **Tests:** Register doc-frontmatter shared fixtures via root ``tests/conftest.py`` + ``pytest_plugins`` (Pytest 8+ forbids ``pytest_plugins`` in nested conftests); scope env/cache + isolation to ``test_doc_frontmatter`` paths only. +- OpenSpec (`openspec/config.yaml`): **SpecFact code review JSON** dogfood tasks require a fresh + `.specfact/code-review.json`, remediation of review findings before merge, and TDD evidence. + **Freshness** excludes `openspec/changes//TDD_EVIDENCE.md` from the staleness + comparison so evidence-only edits there do not by themselves force a new review run. +- `scripts/pre_commit_code_review.py`: invoke review from repo root (`cwd`), emit JSON as above, + and enforce a 300s subprocess timeout with `TimeoutExpired` handling so the hook cannot hang + indefinitely. + +### Changed + +- **CI:** `Docs Review` GitHub Actions workflow runs `hatch run doc-frontmatter-check` + and includes doc-frontmatter unit/integration tests alongside `tests/unit/docs/`, with path + filters for frontmatter scripts and helpers. +- Pre-commit `specfact-code-review-gate`: switched from `specfact code review run` with + `--score-only` to `specfact code review run --json --out .specfact/code-review.json`, writing + governed `ReviewReport` JSON under gitignored `.specfact/` for IDE and Copilot workflows. The + hook now prints severity counts on stderr, a short summary with the report path (absolute path + hint), copy-paste prompts for Copilot/Cursor, and relies on `verbose: true` so successful runs + still surface that feedback next to the nested CLI output. +- Documentation: `docs/modules/code-review.md` updated for the JSON report path and the pre-commit hook behavior. +- Tests: `tests/unit/scripts/test_pre_commit_code_review.py` and `test_code_review_module_docs.py` updated for + `--json`/`--out`, repo-root `cwd`, and timeout handling. +- Doc frontmatter validation now uses a Pydantic `DocFrontmatter` model, stricter `tracks` glob checks + (`fnmatch.translate` + `re.compile` after bracket/brace balance), and a reference page at + `docs/contributing/frontmatter-schema.md`. + +--- + ## [0.43.1] - 2026-03-28 ### Changed diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c8fea83e..65ed0a91 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -70,6 +70,10 @@ hatch test --cover -v - **Type Hints**: Use type hints with `@beartype` runtime checking - **Testing**: Contract-first testing with scenario tests - **Documentation**: Update relevant documentation +- **Docs site (`docs/`)**: Jekyll pages keep existing frontmatter (`layout`, `title`, `permalink`, …). + Pages listed in `docs/.doc-frontmatter-enforced` must include `doc_owner`, `tracks`, + `last_reviewed`, and `exempt`; `exempt_reason` is required only when `exempt: true`. See + [docs/contributing/docs-sync.md](docs/contributing/docs-sync.md). - **Docstrings**: Follow Google Python Style Guide ### Pre-commit Checks diff --git a/docs/.doc-frontmatter-enforced b/docs/.doc-frontmatter-enforced new file mode 100644 index 00000000..bfca20a4 --- /dev/null +++ b/docs/.doc-frontmatter-enforced @@ -0,0 +1,8 @@ +# Paths (relative to repo root) that must carry full doc-sync frontmatter. +# Expand this list as the rollout progresses; use `scripts/check_doc_frontmatter.py --all-docs` for a full-site check. +docs/index.md +docs/getting-started/quickstart.md +docs/core-cli/init.md +docs/reference/documentation-url-contract.md +docs/contributing/docs-sync.md +docs/contributing/frontmatter-schema.md diff --git a/docs/contributing/docs-sync.md b/docs/contributing/docs-sync.md new file mode 100644 index 00000000..6d25ec98 --- /dev/null +++ b/docs/contributing/docs-sync.md @@ -0,0 +1,53 @@ +--- +layout: default +title: Documentation ownership and frontmatter +permalink: /contributing/docs-sync/ +description: YAML frontmatter schema for documentation ownership, tracking, and validation on docs.specfact.io. +keywords: [documentation, frontmatter, ownership, validation] +audience: [solo, team, enterprise] +expertise_level: [intermediate, advanced] +doc_owner: specfact-cli +tracks: + - scripts/check_doc_frontmatter.py + - docs/** +last_reviewed: 2026-03-29 +exempt: false +exempt_reason: "" +--- + +# Documentation ownership and frontmatter + +Core documentation uses **YAML frontmatter** for Jekyll (layout, title, permalink) and for **ownership** fields that drive the `scripts/check_doc_frontmatter.py` checker. + +## Schema (ownership) + +For field definitions, validation rules, and cross-repo notes, see **[Doc-sync frontmatter reference](frontmatter-schema.md)**. + +Add these keys alongside existing Jekyll fields: + +| Field | Required | Description | +| --- | --- | --- | +| `doc_owner` | yes | Repo path (e.g. `src/specfact_cli`) or a known token (`specfact-cli`, `nold-ai`, `openspec`). | +| `tracks` | yes | List of glob patterns for sources this page should stay aligned with. | +| `last_reviewed` | yes | `YYYY-MM-DD` date of last substantive review. | +| `exempt` | yes | `true` only for stable or legal pages that skip sync rules; use with `exempt_reason`. | +| `exempt_reason` | yes | Empty string when `exempt` is `false`; non-empty when `exempt` is `true`. | + +## Validation + +From the repository root: + +```bash +hatch run doc-frontmatter-check +``` + +Use `--fix-hint` for suggested YAML blocks. During rollout, only paths listed in `docs/.doc-frontmatter-enforced` are checked unless you pass `--all-docs`. +The enforced-path file accepts glob patterns matched against repo-relative Markdown paths under +`docs/` and root docs such as `USAGE-FAQ.md`; blank lines and lines starting with `#` are ignored by +the checker. + +## Troubleshooting + +- **Missing `doc_owner`**: add the field and a sensible `tracks` list for the code or specs this page describes. +- **Owner does not resolve**: use a path that exists under the repo or one of the known tokens. +- **Invalid `tracks`**: ensure balanced `[]` and `{}` in glob patterns. diff --git a/docs/contributing/frontmatter-schema.md b/docs/contributing/frontmatter-schema.md new file mode 100644 index 00000000..a886161e --- /dev/null +++ b/docs/contributing/frontmatter-schema.md @@ -0,0 +1,46 @@ +--- +layout: default +title: Doc-sync frontmatter reference +permalink: /contributing/frontmatter-schema/ +description: Required ownership fields, validation rules, and cross-site documentation contracts for core docs. +keywords: [documentation, frontmatter, schema, validation] +audience: [solo, team, enterprise] +expertise_level: [intermediate, advanced] +doc_owner: specfact-cli +tracks: + - scripts/check_doc_frontmatter.py + - docs/contributing/** +last_reviewed: 2026-03-29 +exempt: false +exempt_reason: "" +--- + +# Doc-sync frontmatter reference + +This page is the **authoritative schema** for the ownership and tracking fields consumed by `scripts/check_doc_frontmatter.py`. Jekyll fields (`layout`, `title`, `permalink`, etc.) stay unchanged; the rows below are **additional** keys for doc-sync. + +## Required fields + +| Field | Type | Rules | +| --- | --- | --- | +| `doc_owner` | string | Repo-relative path that exists, or a token from `VALID_OWNER_TOKENS` in the script (`specfact-cli`, `nold-ai`, `openspec`). | +| `tracks` | list of strings | Non-empty; each entry is a [fnmatch](https://docs.python.org/3/library/fnmatch.html)-style glob. Brackets and braces must balance; patterns must compile as fnmatch regex (see `validate_glob_patterns` in the script). | +| `last_reviewed` | `YYYY-MM-DD` | ISO date; YAML may load as a string or date. | +| `exempt` | boolean | `true` only for pages that intentionally skip sync rules. | +| `exempt_reason` | string | Empty when `exempt` is `false`; non-empty when `exempt` is `true`. | + +Runtime validation uses a Pydantic model (`DocFrontmatter` in `scripts/check_doc_frontmatter.py`): missing keys, wrong types, bad globs, unresolved `doc_owner`, and invalid exempt/reason pairs fail the check. + +## Cross-repo notes + +- **Core vs modules:** Canonical user-facing docs for bundles and marketplace content live on [modules.specfact.io](https://modules.specfact.io/). Linking and URL rules are described in [Documentation URL contract](../reference/documentation-url-contract.md). +- **Bundled modules:** When CLI or module surface changes, update both this repo’s docs (as tracked by `tracks`) and the sibling **specfact-cli-modules** documentation where applicable; CodeRabbit linked-repo analysis can flag cross-repo drift. + +## Commands + +```bash +hatch run doc-frontmatter-check +hatch run doc-frontmatter-check -- --all-docs +``` + +Rollout scope is controlled by `docs/.doc-frontmatter-enforced`. See [Documentation ownership and frontmatter](/contributing/docs-sync/) for workflow. diff --git a/docs/core-cli/init.md b/docs/core-cli/init.md index b87c67b7..61488d21 100644 --- a/docs/core-cli/init.md +++ b/docs/core-cli/init.md @@ -6,6 +6,13 @@ description: Reference for the specfact init command - bootstrap SpecFact in a r keywords: [init, bootstrap, ide, profiles] audience: [solo, team, enterprise] expertise_level: [beginner, intermediate] +doc_owner: specfact-cli +tracks: + - src/specfact_cli/** + - openspec/** +last_reviewed: 2026-03-29 +exempt: false +exempt_reason: "" --- # specfact init diff --git a/docs/getting-started/quickstart.md b/docs/getting-started/quickstart.md index 21185bc2..1d35e081 100644 --- a/docs/getting-started/quickstart.md +++ b/docs/getting-started/quickstart.md @@ -8,6 +8,13 @@ description: Get SpecFact CLI running in under 5 minutes - install, bootstrap, a keywords: [quickstart, first-run, bootstrap, analysis] audience: [solo, team] expertise_level: [beginner] +doc_owner: specfact-cli +tracks: + - src/specfact_cli/** + - openspec/** +last_reviewed: 2026-03-29 +exempt: false +exempt_reason: "" --- # 5-Minute Quickstart diff --git a/docs/index.md b/docs/index.md index ca916fa4..0895533e 100644 --- a/docs/index.md +++ b/docs/index.md @@ -6,6 +6,13 @@ permalink: / keywords: [specfact, core-cli, runtime, module-system, architecture] audience: [solo, team, enterprise] expertise_level: [beginner, intermediate, advanced] +doc_owner: specfact-cli +tracks: + - src/specfact_cli/** + - openspec/** +last_reviewed: 2026-03-29 +exempt: false +exempt_reason: "" --- # SpecFact CLI Documentation diff --git a/docs/modules/code-review.md b/docs/modules/code-review.md index 2ea7a5cb..97aaa120 100644 --- a/docs/modules/code-review.md +++ b/docs/modules/code-review.md @@ -116,14 +116,42 @@ repos: entry: hatch run python scripts/pre_commit_code_review.py language: system files: \.pyi?$ + # Needed so you still see hook output when the gate passes (pre-commit hides it otherwise). + verbose: true ``` The helper script scopes the gate to staged Python files only and then runs: ```bash -specfact code review run --score-only +specfact code review run --json --out .specfact/code-review.json ``` +The JSON report is written under ``.specfact/`` (ignored by git via ``.specfact/`` in ``.gitignore``) so local tools and Copilot can read structured findings. The pre-commit helper does **not** print the nested review CLI’s full stdout (tool banners and runner lines); it only prints the short summary and copy-paste lines on stderr after the run. + +### Verdict line, report file, and Copilot + +After each run, the helper prints a **one-line summary on stderr** (with ``verbose: true`` on the hook so it is visible even when the commit is allowed), for example: + +```text +Code review summary: 3 finding(s) (errors=0, warnings=3, advisory=0); overall_verdict='PASS'. +``` + +Immediately after that line, the script prints the **report path** (repo-relative and absolute), then **ready-to-paste prompts** for Copilot or Cursor so you can copy them without opening this page. + +The **authoritative machine-readable report** is always at the repository root: + +```text +.specfact/code-review.json +``` + +Open that file to see ``overall_verdict``, ``score``, ``findings`` (with ``file``, ``line``, ``rule``, ``severity``, ``message``), and the text ``summary``. Regenerate it anytime with the same ``specfact code review run --json --out .specfact/...`` command (or by letting pre-commit run the gate again). + +**Do not forget:** when you want help from GitHub Copilot or Cursor, point the assistant at the JSON file so it can remediate findings with full context. Example prompts you can reuse: + +- *Read ``.specfact/code-review.json`` from the latest review run and fix every finding, starting with errors then warnings.* +- *Using the review report at ``.specfact/code-review.json``, apply fixes in the listed files at the given line numbers.* +- *@workspace Attach or open ``.specfact/code-review.json`` and address each ``findings[]`` entry.* + Commit behavior: - `PASS` keeps the commit green @@ -148,7 +176,7 @@ repos: hooks: - id: specfact-code-review name: specfact code review gate - entry: specfact code review run --score-only + entry: specfact code review run --json --out .specfact/code-review.json language: system files: \.pyi?$ ``` diff --git a/docs/reference/documentation-url-contract.md b/docs/reference/documentation-url-contract.md index 121fdff3..fbc82bf7 100644 --- a/docs/reference/documentation-url-contract.md +++ b/docs/reference/documentation-url-contract.md @@ -6,6 +6,13 @@ description: Rules for linking between docs.specfact.io and modules.specfact.io; keywords: [docs contract, handoff, core vs modules] audience: [team, enterprise] expertise_level: [advanced] +doc_owner: specfact-cli +tracks: + - src/specfact_cli/** + - openspec/** +last_reviewed: 2026-03-29 +exempt: false +exempt_reason: "" --- # Documentation URL contract (core and modules) diff --git a/openspec/CHANGE_ORDER.md b/openspec/CHANGE_ORDER.md index 43f32509..ff7670d8 100644 --- a/openspec/CHANGE_ORDER.md +++ b/openspec/CHANGE_ORDER.md @@ -60,6 +60,7 @@ Only changes that are **archived**, shown as **✓ Complete** by `openspec list` | ✅ code-review-07-house-rules-skill | archived 2026-03-17 | | ✅ code-review-08-review-run-integration | archived 2026-03-17 | | ✅ code-review-09-f4-automation-upgrade | archived 2026-03-17 | +| ✅ doc-frontmatter-schema | archived 2026-03-29 | ### Pending @@ -109,10 +110,10 @@ The 2026-03-22 clean-code plan adds one new cross-repo change pair and re-sequen | Module | Order | Change folder | GitHub # | Blocked by | |--------|-------|---------------|----------|------------| | docs | 01 | docs-01-core-modules-docs-alignment | [#348](https://github.com/nold-ai/specfact-cli/issues/348) | module-migration-01 ✅; module-migration-02 ✅; module-migration-03 ✅; module-migration-05 ✅; module-migration-06/07 outputs inform residual cleanup wording | -| docs | 02 | doc-frontmatter-schema | pending | — | +| docs | 02 | ✅ doc-frontmatter-schema (archived 2026-03-29) | [#461](https://github.com/nold-ai/specfact-cli/issues/461) | Parent Feature: [#356](https://github.com/nold-ai/specfact-cli/issues/356) | | docs | 03 | ✅ docs-03-command-syntax-parity (archived 2026-03-18) | pending | docs-01 ✅; docs-02 ✅ | | docs | 04 | docs-04-docs-review-gate-and-link-integrity | pending | docs-03 ✅ | -| docs | 05 | ci-docs-sync-check | pending | docs-02 (doc-frontmatter-schema) | +| docs | 05 | ci-docs-sync-check | pending | docs-02 (doc-frontmatter-schema) [#461](https://github.com/nold-ai/specfact-cli/issues/461) | | docs | 06 | docs-05-core-site-ia-restructure | [#438](https://github.com/nold-ai/specfact-cli/issues/438) | docs-04; Parent Feature: [#356](https://github.com/nold-ai/specfact-cli/issues/356) | | docs | 07 | docs-07-core-handoff-conversion | [#439](https://github.com/nold-ai/specfact-cli/issues/439) | docs-05-core-site-ia-restructure; modules-repo/docs-06-modules-site-ia-restructure | | docs | 08 | docs-12-docs-validation-ci | [#440](https://github.com/nold-ai/specfact-cli/issues/440) | docs-05-core-site-ia-restructure; docs-07-core-handoff-conversion; modules-repo/docs-06 through docs-10 | @@ -168,6 +169,7 @@ Cross-repo dependency: `docs-07-core-handoff-conversion` depends on `specfact-cl | Module | Order | Change folder | GitHub # | Blocked by | |--------|-------|----------------|----------|------------| | — | — | ✅ ci-01 implemented 2026-02-16 (see Implemented above) | — | — | +| ci | 02 | ci-02-trustworthy-green-checks | [#465](https://github.com/nold-ai/specfact-cli/issues/465) | ci-01 ✅; code-review-08 ✅; Parent Feature: [#406](https://github.com/nold-ai/specfact-cli/issues/406) | ### Packaging and distribution @@ -378,8 +380,8 @@ Set these in GitHub so issue dependencies are explicit. Optional dependencies ar | [#255](https://github.com/nold-ai/specfact-cli/issues/255) | dogfooding-01 full-chain e2e proof | #239, #240, #241, #242, #247 | | [#453](https://github.com/nold-ai/specfact-cli/issues/453) | speckit-02 v0.4.x adapter alignment | — | | [modules#116](https://github.com/nold-ai/specfact-cli-modules/issues/116) | speckit-03 change proposal bridge *(modules repo)* | speckit-02 (#453) | -| TBD | doc-frontmatter-schema | — | -| TBD | ci-docs-sync-check | doc-frontmatter-schema | +| [#461](https://github.com/nold-ai/specfact-cli/issues/461) | doc-frontmatter-schema | Parent Feature: [#356](https://github.com/nold-ai/specfact-cli/issues/356) | +| TBD | ci-docs-sync-check | doc-frontmatter-schema ([#461](https://github.com/nold-ai/specfact-cli/issues/461)) | | TBD | code-review-02 ruff/radon runners | code-review-01 | | TBD | code-review-03 type/governance runners | code-review-01 | diff --git a/openspec/changes/doc-frontmatter-schema/.openspec.yaml b/openspec/changes/archive/2026-03-29-doc-frontmatter-schema/.openspec.yaml similarity index 100% rename from openspec/changes/doc-frontmatter-schema/.openspec.yaml rename to openspec/changes/archive/2026-03-29-doc-frontmatter-schema/.openspec.yaml diff --git a/openspec/changes/doc-frontmatter-schema/TDD_EVIDENCE.md b/openspec/changes/archive/2026-03-29-doc-frontmatter-schema/TDD_EVIDENCE.md similarity index 59% rename from openspec/changes/doc-frontmatter-schema/TDD_EVIDENCE.md rename to openspec/changes/archive/2026-03-29-doc-frontmatter-schema/TDD_EVIDENCE.md index d11de092..a406b667 100644 --- a/openspec/changes/doc-frontmatter-schema/TDD_EVIDENCE.md +++ b/openspec/changes/archive/2026-03-29-doc-frontmatter-schema/TDD_EVIDENCE.md @@ -5,6 +5,7 @@ ### Test Run: 2026-03-20 - Frontmatter Schema Tests **Command:** + ```bash cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema && python -m pytest tests/unit/scripts/test_doc_frontmatter/test_schema.py -v ``` @@ -33,6 +34,7 @@ cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema & ### Test Run: [Date] - Frontmatter Schema Tests (After Implementation) **Command:** + ```bash cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema && python -m pytest tests/unit/scripts/test_doc_frontmatter/test_schema.py -v ``` @@ -46,6 +48,7 @@ cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema & ### Test Run: 2026-03-20 - Validation Logic Tests (Pre-Implementation) **Command:** + ```bash cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema && python -m pytest tests/unit/scripts/test_doc_frontmatter/test_validation.py -v ``` @@ -71,6 +74,7 @@ cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema & ### Test Run: 2026-03-20 - Integration Tests (Pre-Implementation) **Command:** + ```bash cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema && python -m pytest tests/integration/scripts/test_doc_frontmatter/test_integration.py -v ``` @@ -91,4 +95,87 @@ cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema & **Error:** - `TestPerformance::test_execution_time_with_many_files` - fixture 'benchmark' not found (expected - benchmark fixture not available) -**Status:** ✅ TDD workflow confirmed - integration tests fail before implementation \ No newline at end of file +**Status:** ✅ TDD workflow confirmed - integration tests fail before implementation + +--- + +## Post-Implementation (2026-03-29) + +**Commands:** + +```bash +cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema +hatch run pytest tests/unit/scripts/test_doc_frontmatter tests/integration/scripts/test_doc_frontmatter -q +``` + +**Result:** 31 passed (unit + integration). `last_reviewed` validation accepts YAML date objects via `isoformat()`. Rollout uses `docs/.doc-frontmatter-enforced`; default CLI skips when file missing; `--all-docs` validates entire `docs/` tree. + +**Status:** ✅ Green + +--- + +## Quality gates (2026-03-30) + +Recorded for `tasks.md` §5 checklist evidence (commands run from the feature worktree). + +**Timestamp:** 2026-03-30 (local) — see session `date` when re-running. + +**Commands (success unless noted):** + +| Step | Command | Result summary | +|------|---------|------------------| +| Format | `hatch run format` | Ruff format + fix applied; exit 0 | +| Type check | `hatch run type-check` | basedpyright strict; exit 0 | +| Lint | `hatch run lint` | Full lint suite; exit 0 | +| Contract | `hatch run contract-test` | Contract-first validation; exit 0 | +| Tests | `hatch run smart-test-full` or `hatch test --cover -v` | Full suite green | +| OpenSpec | `openspec validate doc-frontmatter-schema --strict` | Validation passed | + +**Note:** Re-run the rows above after substantive edits and refresh this table if outputs change. + +--- + +## PR Orchestrator Parallelization Delta (2026-03-30) + +### Pre-Implementation Test Failure (Expected) - PR Orchestrator Delta + +**Timestamp:** 2026-03-30T01:09:14+02:00 + +**Command:** + +```bash +cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema +/home/dom/git/nold-ai/specfact-cli/.venv/bin/pytest tests/unit/specfact_cli/registry/test_signing_artifacts.py -q -k 'independent_jobs_do_not_wait_for_tests or quality_gates_still_depends_on_tests_for_coverage' +``` + +**Result:** ✅ 5 tests failed as expected before workflow changes; 1 coverage-dependency guard test passed. + +**Failure summary:** +- `compat-py311` currently depends on `changes` and `tests` instead of `changes` and `verify-module-signatures` +- `contract-first-ci` currently depends on `changes`, `tests`, and `compat-py311` +- `type-checking` currently depends on `changes` and `tests` +- `linting` currently depends on `changes` and `tests` +- `cli-validation` currently depends on `changes` and `contract-first-ci` + +**Status:** ✅ TDD workflow confirmed for the PR-orchestrator dependency delta + +### Post-Implementation Test Success + +**Command:** + +```bash +cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema +/home/dom/git/nold-ai/specfact-cli/.venv/bin/pytest tests/unit/specfact_cli/registry/test_signing_artifacts.py -q -k 'independent_jobs_do_not_wait_for_tests or quality_gates_still_depends_on_tests_for_coverage' +/home/dom/git/nold-ai/specfact-cli/.venv/bin/pytest tests/unit/scripts/test_doc_frontmatter tests/integration/scripts/test_doc_frontmatter -q +``` + +**Result:** ✅ Workflow dependency tests passed (`6 passed, 20 deselected`) and doc-frontmatter +regression slice remained green (`34 passed`). + +**Passing summary:** +- `compat-py311`, `contract-first-ci`, `type-checking`, `linting`, and `cli-validation` now depend + on `changes` plus `verify-module-signatures` +- `quality-gates` still depends on `tests` because it consumes coverage artifacts +- Existing doc-frontmatter unit/integration coverage remained unchanged by the workflow delta + +**Status:** ✅ Delta implemented and verified diff --git a/openspec/changes/doc-frontmatter-schema/design.md b/openspec/changes/archive/2026-03-29-doc-frontmatter-schema/design.md similarity index 100% rename from openspec/changes/doc-frontmatter-schema/design.md rename to openspec/changes/archive/2026-03-29-doc-frontmatter-schema/design.md diff --git a/openspec/changes/doc-frontmatter-schema/proposal.md b/openspec/changes/archive/2026-03-29-doc-frontmatter-schema/proposal.md similarity index 70% rename from openspec/changes/doc-frontmatter-schema/proposal.md rename to openspec/changes/archive/2026-03-29-doc-frontmatter-schema/proposal.md index cccc9eef..edf6635e 100644 --- a/openspec/changes/doc-frontmatter-schema/proposal.md +++ b/openspec/changes/archive/2026-03-29-doc-frontmatter-schema/proposal.md @@ -15,62 +15,85 @@ The goal is to implement a frontmatter-based ownership model that: This change introduces a frontmatter-based documentation ownership and validation system: ### New Components + - **YAML Frontmatter Schema**: Standardized metadata format for documentation files -- **Validation Script**: `scripts/check-doc-frontmatter.py` to validate doc ownership +- **Validation Script**: `scripts/check_doc_frontmatter.py` to validate doc ownership - **Pre-commit Hook**: Integration with existing pre-commit workflow - **Documentation**: Updated contributing guidelines with frontmatter requirements ### Modified Components + - **Documentation Files**: Add frontmatter to existing docs in `docs/` directory - **Pre-commit Config**: `.pre-commit-config.yaml` updated with new validation hook - **Contributing Guidelines**: `docs/contributing/` updated with frontmatter documentation +- **PR Orchestrator Workflow**: `.github/workflows/pr-orchestrator.yml` dependency graph reduced so + independent validation jobs can start after signature verification instead of waiting for the + full Python 3.12 test suite ## Capabilities ### New Capabilities + - `doc-frontmatter-schema`: YAML frontmatter schema definition for documentation ownership - `doc-frontmatter-validation`: Pre-commit validation script implementation - `docs-contributing-updates`: Updated contributing guidelines with frontmatter documentation ### Modified Capabilities -None - this is a new feature that doesn't modify existing capabilities + +- `pr-orchestrator-parallelization`: PR workflow jobs that do not consume test artifacts run in + parallel after the shared signature gate ## Impact ### Files to Create -- `scripts/check-doc-frontmatter.py` - Validation script + +- `scripts/check_doc_frontmatter.py` - Validation script - `docs/contributing/docs-sync.md` - Frontmatter documentation ### Files to Modify + - `.pre-commit-config.yaml` - Add validation hook - `docs/**/*.md` - Add frontmatter metadata - `USAGE-FAQ.md` - Add frontmatter (root-level doc) +- `.github/workflows/pr-orchestrator.yml` - remove unnecessary `needs: tests` / downstream + serialization for independent CI jobs +- `tests/unit/specfact_cli/registry/test_signing_artifacts.py` - lock PR orchestrator dependency + graph for parallel execution ### Development Workflow + - Developers must add frontmatter to new documentation files - Pre-commit hooks validate doc metadata on every commit - CI workflows can use metadata for automated sync checking +- PR validation jobs that do not require coverage artifacts or prior test completion should not wait + for the `Tests (Python 3.12)` job ### Quality Gates + - Zero errors policy: All validation must pass - TDD-first approach: Tests for validation script created before implementation - Specfact code review: All changes go through review process - Git worktree patterns: Use git worktrees for isolated development ### GitHub Integration -- GitHub issue sync via specfact after openspec change creation -- Proper labels: `documentation`, `quality`, `automation` -- Link to parent epic: `feature/docs-sync-epic` + +- **Issue**: [#461](https://github.com/nold-ai/specfact-cli/issues/461) — `[Change] Doc Frontmatter Schema & Validation` +- **Labels**: `enhancement`, `documentation`, `change-proposal`, `openspec` +- **Parent Feature**: [#356](https://github.com/nold-ai/specfact-cli/issues/356) — Documentation & Discrepancy Remediation (cross-linked in thread on #356) +- **Repository**: `nold-ai/specfact-cli` +- **Status**: synced 2026-03-29 ## Success Criteria ### Technical Success + - ✅ All tracked documentation has valid frontmatter - ✅ Pre-commit validation prevents invalid commits - ✅ Validation script passes all tests - ✅ Zero errors in quality gates ### Process Success + - ✅ Openspec change follows spec-driven schema - ✅ Git worktree patterns used for isolation - ✅ Specfact code review completes with zero findings @@ -88,4 +111,4 @@ None - this is a new feature that doesn't modify existing capabilities - CI Docs Sync Check (separate change: `ci-docs-sync-check`) - Automated doc update suggestions - Impact analysis for source changes -- Ownership visualization tools \ No newline at end of file +- Ownership visualization tools diff --git a/openspec/changes/doc-frontmatter-schema/specs/doc-frontmatter-schema/spec.md b/openspec/changes/archive/2026-03-29-doc-frontmatter-schema/specs/doc-frontmatter-schema/spec.md similarity index 100% rename from openspec/changes/doc-frontmatter-schema/specs/doc-frontmatter-schema/spec.md rename to openspec/changes/archive/2026-03-29-doc-frontmatter-schema/specs/doc-frontmatter-schema/spec.md diff --git a/openspec/changes/doc-frontmatter-schema/specs/doc-frontmatter-validation/spec.md b/openspec/changes/archive/2026-03-29-doc-frontmatter-schema/specs/doc-frontmatter-validation/spec.md similarity index 77% rename from openspec/changes/doc-frontmatter-schema/specs/doc-frontmatter-validation/spec.md rename to openspec/changes/archive/2026-03-29-doc-frontmatter-schema/specs/doc-frontmatter-validation/spec.md index 7cb7015f..5d341b2d 100644 --- a/openspec/changes/doc-frontmatter-schema/specs/doc-frontmatter-validation/spec.md +++ b/openspec/changes/archive/2026-03-29-doc-frontmatter-schema/specs/doc-frontmatter-validation/spec.md @@ -3,100 +3,123 @@ ## ADDED Requirements ### Requirement: Validation Script Implementation -The system SHALL provide a validation script `scripts/check-doc-frontmatter.py` that enforces frontmatter requirements. + +The system SHALL provide a validation script `scripts/check_doc_frontmatter.py` that enforces frontmatter requirements. #### Scenario: Script execution with valid docs -- **WHEN** `check-doc-frontmatter.py` is executed + +- **WHEN** `check_doc_frontmatter.py` is executed - **AND** all tracked docs have valid frontmatter - **THEN** script SHALL exit with code 0 - **AND** output SHALL show success message #### Scenario: Script execution with invalid docs -- **WHEN** `check-doc-frontmatter.py` is executed + +- **WHEN** `check_doc_frontmatter.py` is executed - **AND** some docs have invalid frontmatter - **THEN** script SHALL exit with code 1 - **AND** output SHALL list all validation failures ### Requirement: Missing Doc Owner Detection + The system SHALL detect documentation files that are missing the `doc_owner` field. #### Scenario: Missing doc_owner in tracked file + - **WHEN** a tracked Markdown file lacks `doc_owner` field - **THEN** validation SHALL fail - **AND** error SHALL specify missing field #### Scenario: Valid doc_owner present + - **WHEN** a tracked Markdown file has valid `doc_owner` - **THEN** validation SHALL pass for owner requirement ### Requirement: Owner Resolution Validation + The system SHALL validate that `doc_owner` values resolve to existing paths or known tokens. #### Scenario: Owner resolves to existing path + - **WHEN** `doc_owner` is a path that exists in repository - **THEN** validation SHALL pass for owner resolution #### Scenario: Owner is valid known token + - **WHEN** `doc_owner` is in `VALID_OWNER_TOKENS` - **THEN** validation SHALL pass for owner resolution #### Scenario: Owner cannot be resolved + - **WHEN** `doc_owner` doesn't resolve to path or token - **THEN** validation SHALL fail - **AND** error SHALL suggest valid alternatives ### Requirement: Fix Hint Generation + The system SHALL provide helpful fix hints when validation fails. #### Scenario: Fix hint for missing frontmatter + - **WHEN** validation fails due to missing frontmatter - **AND** `--fix-hint` flag is used - **THEN** output SHALL include suggested frontmatter template #### Scenario: Fix hint for invalid owner + - **WHEN** validation fails due to invalid owner - **AND** `--fix-hint` flag is used - **THEN** output SHALL suggest valid owner alternatives ### Requirement: Tracked Files Discovery + The system SHALL discover all Markdown files that should be tracked for frontmatter validation. #### Scenario: Discover files in docs directory + - **WHEN** script runs - **THEN** all `docs/**/*.md` files SHALL be discovered - **AND** exempt files SHALL be excluded #### Scenario: Discover root-level docs + - **WHEN** script runs - **THEN** configured root-level docs SHALL be discovered - **AND** exempt files SHALL be excluded ### Requirement: Exempt Files Handling + The system SHALL properly handle files marked as exempt. #### Scenario: Exempt file with valid reason + - **WHEN** file has `exempt: true` with valid reason - **THEN** file SHALL be excluded from validation #### Scenario: Non-exempt file validation + - **WHEN** file has `exempt: false` or no exempt field - **THEN** file SHALL undergo full validation ## Contract Requirements ### Requirement: Validation Contracts + The validation script SHALL use `@icontract` decorators for validation logic: - `@require` for input validation - `@ensure` for validation results #### Scenario: Invalid file path input + - **WHEN** script receives invalid file path - **THEN** `@require` contract SHALL raise appropriate exception ### Requirement: Error Handling Contracts + The script SHALL handle errors gracefully with appropriate contracts. #### Scenario: File read error + - **WHEN** script encounters file read error - **THEN** error SHALL be caught and handled - **AND** script SHALL continue with other files @@ -104,15 +127,38 @@ The script SHALL handle errors gracefully with appropriate contracts. ## Performance Requirements ### Requirement: Efficient File Processing + The validation script SHALL process files efficiently. #### Scenario: Large documentation set + - **WHEN** script processes 100+ documentation files - **THEN** execution SHALL complete in < 2 seconds ### Requirement: Memory Efficiency + The script SHALL be memory efficient. #### Scenario: Memory usage with many files + - **WHEN** script processes large number of files -- **THEN** memory usage SHALL remain under 100MB \ No newline at end of file +- **THEN** memory usage SHALL remain under 100MB + +### Requirement: PR Orchestrator Parallel Job Graph + +The PR orchestrator workflow SHALL not serialize independent validation jobs behind the Python 3.12 +test suite when those jobs do not consume test artifacts. + +#### Scenario: Independent jobs start after shared signature gate + +- **WHEN** `.github/workflows/pr-orchestrator.yml` defines `compat-py311`, `contract-first-ci`, + `type-checking`, `linting`, and `cli-validation` +- **THEN** each of those jobs SHALL depend on `changes` and the shared signature gate +- **AND** none of those jobs SHALL list `tests` as a required predecessor unless they consume test + artifacts from that job + +#### Scenario: Coverage-based advisory gate still depends on tests + +- **WHEN** `quality-gates` reads the `coverage-reports` artifact +- **THEN** `quality-gates` SHALL keep `tests` as an explicit dependency +- **AND** the workflow SHALL continue to gate that job on unit-coverage availability diff --git a/openspec/changes/doc-frontmatter-schema/specs/docs-contributing-updates/spec.md b/openspec/changes/archive/2026-03-29-doc-frontmatter-schema/specs/docs-contributing-updates/spec.md similarity index 100% rename from openspec/changes/doc-frontmatter-schema/specs/docs-contributing-updates/spec.md rename to openspec/changes/archive/2026-03-29-doc-frontmatter-schema/specs/docs-contributing-updates/spec.md diff --git a/openspec/changes/doc-frontmatter-schema/tasks.md b/openspec/changes/archive/2026-03-29-doc-frontmatter-schema/tasks.md similarity index 53% rename from openspec/changes/doc-frontmatter-schema/tasks.md rename to openspec/changes/archive/2026-03-29-doc-frontmatter-schema/tasks.md index a7494cff..5233716a 100644 --- a/openspec/changes/doc-frontmatter-schema/tasks.md +++ b/openspec/changes/archive/2026-03-29-doc-frontmatter-schema/tasks.md @@ -28,11 +28,12 @@ Do not implement production code until tests exist and have been run (expecting - [x] 2.2 Add test dependencies - [x] 2.2.1 Ensure PyYAML is in requirements: `pip install pyyaml` - - [ ] 2.2.2 Add to project dependencies if needed + - [x] 2.2.2 Add to project dependencies if needed (PyYAML already in `pyproject.toml`) ## 3. Test Implementation (TDD - Create Tests First) ### 3.1 Frontmatter Schema Tests + - [x] 3.1.1 Create `tests/unit/scripts/test_doc_frontmatter/test_schema.py` - [x] 3.1.1.1 Test valid frontmatter parsing - [x] 3.1.1.2 Test missing required fields detection @@ -45,6 +46,7 @@ Do not implement production code until tests exist and have been run (expecting - [x] 3.1.2.2 Capture failure output in `openspec/changes/doc-frontmatter-schema/TDD_EVIDENCE.md` ### 3.2 Validation Logic Tests + - [x] 3.2.1 Create `tests/unit/scripts/test_doc_frontmatter/test_validation.py` - [x] 3.2.1.1 Test missing doc_owner detection - [x] 3.2.1.2 Test invalid owner resolution @@ -57,6 +59,7 @@ Do not implement production code until tests exist and have been run (expecting - [x] 3.2.2.2 Capture failure output ### 3.3 Integration Tests + - [x] 3.3.1 Create `tests/integration/scripts/test_doc_frontmatter/test_integration.py` - [x] 3.3.1.1 Test end-to-end validation workflow - [x] 3.3.1.2 Test multiple file scenarios @@ -69,7 +72,8 @@ Do not implement production code until tests exist and have been run (expecting ## 4. Implementation (Create Code to Pass Tests) ### 4.1 Frontmatter Schema Implementation -- [x] 4.1.1 Create `scripts/check-doc-frontmatter.py` + +- [x] 4.1.1 Create `scripts/check_doc_frontmatter.py` - [x] 4.1.1.1 Implement `parse_frontmatter()` function with `@icontract` and `@beartype` - [x] 4.1.1.2 Implement `extract_doc_owner()` function - [x] 4.1.1.3 Implement `resolve_owner()` function @@ -77,110 +81,138 @@ Do not implement production code until tests exist and have been run (expecting - [x] 4.1.1.5 Implement `suggest_frontmatter()` function ### 4.2 Validation Logic Implementation -- [x] 4.2.1 Complete `check-doc-frontmatter.py` validation logic + +- [x] 4.2.1 Complete `check_doc_frontmatter.py` validation logic - [x] 4.2.1.1 Implement `get_all_md_files()` function - [x] 4.2.1.2 Implement `rg_missing_doc_owner()` function - [x] 4.2.1.3 Implement main validation loop - [x] 4.2.1.4 Implement error reporting with color coding ### 4.3 Command-Line Interface -- [x] 4.3.1 Add argument parsing with `--fix-hint` flag - - [x] 4.3.1.1 Use `argparse` for CLI arguments + +- [x] 4.3.1 Add CLI with `--fix-hint` and `--all-docs` flags + - [x] 4.3.1.1 Use Typer for CLI arguments (Rich-backed help via Typer) - [x] 4.3.1.2 Implement help text and usage examples ### 4.4 Test Execution and Evidence -- [ ] 4.4.1 Run all tests and verify they now pass - - [ ] 4.4.1.1 `pytest tests/unit/scripts/test_doc_frontmatter/ -v` - - [ ] 4.4.1.2 `pytest tests/integration/scripts/test_doc_frontmatter/ -v` - - [ ] 4.4.1.3 Capture passing results in `TDD_EVIDENCE.md` + +- [x] 4.4.1 Run all tests and verify they now pass + - [x] 4.4.1.1 `pytest tests/unit/scripts/test_doc_frontmatter/ -v` + - [x] 4.4.1.2 `pytest tests/integration/scripts/test_doc_frontmatter/ -v` + - [x] 4.4.1.3 Capture passing results in `TDD_EVIDENCE.md` ## 5. Quality Gates and Validation ### 5.1 Code Quality Checks -- [ ] 5.1.1 Run formatting: `hatch run format` -- [ ] 5.1.2 Run type checking: `hatch run type-check` -- [ ] 5.1.3 Run linting: `hatch run lint` -- [ ] 5.1.4 Fix any issues found + +- [x] 5.1.1 Run formatting: `hatch run format` +- [x] 5.1.2 Run type checking: `hatch run type-check` +- [x] 5.1.3 Run linting: `hatch run lint` +- [x] 5.1.4 Fix any issues found ### 5.2 Contract Validation -- [ ] 5.2.1 Run contract tests: `hatch run contract-test` -- [ ] 5.2.2 Ensure all `@icontract` and `@beartype` decorators work correctly + +- [x] 5.2.1 Run contract tests: `hatch run contract-test` +- [x] 5.2.2 Ensure all `@icontract` and `@beartype` decorators work correctly ### 5.3 OpenSpec Validation -- [ ] 5.3.1 Run `openspec validate doc-frontmatter-schema --strict` -- [ ] 5.3.2 Fix any validation issues + +- [x] 5.3.1 Run `openspec validate doc-frontmatter-schema --strict` +- [x] 5.3.2 Fix any validation issues + +### 5.4 PR Orchestrator Parallelization Delta + +- [x] 5.4.1 Extend spec/tests to cover PR orchestrator dependency parallelization + - [x] 5.4.1.1 Add spec delta for independent CI job dependencies + - [x] 5.4.1.2 Add workflow dependency tests in `tests/unit/specfact_cli/registry/test_signing_artifacts.py` + - [x] 5.4.1.3 Run the new workflow dependency tests and record the expected failure in `TDD_EVIDENCE.md` +- [x] 5.4.2 Update `.github/workflows/pr-orchestrator.yml` + - [x] 5.4.2.1 Remove unnecessary `tests` / downstream dependencies for jobs that do not consume test artifacts + - [x] 5.4.2.2 Keep real artifact dependencies intact (`quality-gates`, main-branch packaging) +- [x] 5.4.3 Re-run workflow dependency tests and doc-frontmatter regression slice + - [x] 5.4.3.1 Capture passing evidence in `TDD_EVIDENCE.md` ## 6. Documentation Updates ### 6.1 Frontmatter Schema Documentation -- [ ] 6.1.1 Create `docs/contributing/docs-sync.md` - - [ ] 6.1.1.1 Document frontmatter schema with examples - - [ ] 6.1.1.2 Explain each field and its purpose - - [ ] 6.1.1.3 Provide common patterns and best practices + +- [x] 6.1.1 Create `docs/contributing/docs-sync.md` + - [x] 6.1.1.1 Document frontmatter schema with examples + - [x] 6.1.1.2 Explain each field and its purpose + - [x] 6.1.1.3 Provide common patterns and best practices ### 6.2 Validation Workflow Documentation -- [ ] 6.2.1 Add validation workflow section to docs - - [ ] 6.2.1.1 Explain how validation works - - [ ] 6.2.1.2 Document error messages and fixes - - [ ] 6.2.1.3 Provide troubleshooting guide + +- [x] 6.2.1 Add validation workflow section to docs + - [x] 6.2.1.1 Explain how validation works + - [x] 6.2.1.2 Document error messages and fixes + - [x] 6.2.1.3 Provide troubleshooting guide ### 6.3 Update Contributing Guidelines -- [ ] 6.3.1 Update `CONTRIBUTING.md` with frontmatter requirements -- [ ] 6.3.2 Add frontmatter section to documentation standards + +- [x] 6.3.1 Update `CONTRIBUTING.md` with frontmatter requirements +- [x] 6.3.2 Add frontmatter section to documentation standards (`docs/contributing/docs-sync.md` is the canonical standards page for this rollout) ## 7. Pre-commit Integration ### 7.1 Update Pre-commit Configuration -- [ ] 7.1.1 Modify `.pre-commit-config.yaml` - - [ ] 7.1.1.1 Add `check-doc-frontmatter` hook - - [ ] 7.1.1.2 Configure appropriate file patterns - - [ ] 7.1.1.3 Set `always_run: false` + +- [x] 7.1.1 Modify `.pre-commit-config.yaml` + - [x] 7.1.1.1 Add `check-doc-frontmatter` hook + - [x] 7.1.1.2 Configure appropriate file patterns + - [x] 7.1.1.3 Set `always_run: false` ### 7.2 Test Pre-commit Hook -- [ ] 7.2.1 Run `pre-commit install` to install hooks -- [ ] 7.2.2 Test hook with sample files -- [ ] 7.2.3 Verify hook works correctly + +- [x] 7.2.1 Run `pre-commit install` to install hooks +- [x] 7.2.2 Test hook with sample files +- [x] 7.2.3 Verify hook works correctly ## 8. Sample Implementation and Testing ### 8.1 Add Frontmatter to Sample Files -- [ ] 8.1.1 Add frontmatter to 3-5 sample documentation files -- [ ] 8.1.2 Test validation with sample files -- [ ] 8.1.3 Verify no validation errors + +- [x] 8.1.1 Add frontmatter to 3-5 sample documentation files +- [x] 8.1.2 Test validation with sample files +- [x] 8.1.3 Verify no validation errors ### 8.2 Test Error Scenarios -- [ ] 8.2.1 Create files with invalid frontmatter -- [ ] 8.2.2 Test validation error detection -- [ ] 8.2.3 Verify error messages are helpful + +- [x] 8.2.1 Create files with invalid frontmatter +- [x] 8.2.2 Test validation error detection +- [x] 8.2.3 Verify error messages are helpful ## 9. Create GitHub Issue and PR ### 9.1 Create GitHub Issue -- [ ] 9.1.1 Create issue in nold-ai/specfact-cli - - [ ] 9.1.1.1 Title: `[Change] Doc Frontmatter Schema & Validation` - - [ ] 9.1.1.2 Labels: `enhancement`, `change-proposal`, `documentation`, `quality` - - [ ] 9.1.1.3 Body: Summary from proposal.md - - [ ] 9.1.1.4 Link to parent epic: `feature/docs-sync-epic` + +- [x] 9.1.1 Create issue in nold-ai/specfact-cli + - [x] 9.1.1.1 Title: `[Change] Doc Frontmatter Schema & Validation` + - [x] 9.1.1.2 Labels: `enhancement`, `change-proposal`, `documentation`, `openspec` (repo-standard; `quality` not a label — use `QA` if needed later) + - [x] 9.1.1.3 Body: Summary from proposal.md + OpenSpec paths + scope + - [x] 9.1.1.4 Parent Feature: [#356](https://github.com/nold-ai/specfact-cli/issues/356) (linked in issue body; tracking comment on #356) + - [x] 9.1.1.5 **Issue number**: [#461](https://github.com/nold-ai/specfact-cli/issues/461) ### 9.2 Create Pull Request -- [ ] 9.2.1 Prepare commit with all changes - - [ ] 9.2.1.1 `git add .` - - [ ] 9.2.1.2 `git commit -m "feat: add doc frontmatter schema and validation"` - - [ ] 9.2.1.3 `git push -u origin feature/doc-frontmatter-schema` -- [ ] 9.2.2 Create PR using GitHub CLI - - [ ] 9.2.2.1 `gh pr create --repo nold-ai/specfact-cli --base dev --head feature/doc-frontmatter-schema` - - [ ] 9.2.2.2 Use PR template with OpenSpec change ID - - [ ] 9.2.2.3 Link to GitHub issue +- [x] 9.2.1 Prepare commit with all changes + - [x] 9.2.1.1 `git add .` + - [x] 9.2.1.2 `git commit -m "feat: add doc frontmatter schema and validation"` + - [x] 9.2.1.3 `git push -u origin feature/doc-frontmatter-schema` + +- [x] 9.2.2 Create PR using GitHub CLI + - [x] 9.2.2.1 `gh pr create --repo nold-ai/specfact-cli --base dev --head feature/doc-frontmatter-schema` + - [x] 9.2.2.2 Use PR template with OpenSpec change ID + - [x] 9.2.2.3 Link to GitHub issue - [ ] 9.2.3 Add to project board - [ ] 9.2.3.1 `gh project item-add 1 --owner nold-ai --url ` ## 10. Post-merge cleanup (after PR is merged) -- [ ] 10.1 Return to primary checkout: `cd .../specfact-cli` -- [ ] 10.2 `git fetch origin` +- [x] 10.1 Return to primary checkout: `cd .../specfact-cli` +- [x] 10.2 `git fetch origin` - [ ] 10.3 `git worktree remove ../specfact-cli-worktrees/feature/doc-frontmatter-schema` - [ ] 10.4 `git branch -d feature/doc-frontmatter-schema` - [ ] 10.5 `git worktree prune` -- [ ] 10.6 (Optional) `git push origin --delete feature/doc-frontmatter-schema` \ No newline at end of file +- [ ] 10.6 (Optional) `git push origin --delete feature/doc-frontmatter-schema` diff --git a/openspec/changes/ci-02-trustworthy-green-checks/CHANGE_VALIDATION.md b/openspec/changes/ci-02-trustworthy-green-checks/CHANGE_VALIDATION.md new file mode 100644 index 00000000..7a61ccf4 --- /dev/null +++ b/openspec/changes/ci-02-trustworthy-green-checks/CHANGE_VALIDATION.md @@ -0,0 +1,30 @@ +# Change Validation: ci-02-trustworthy-green-checks + +- **Validated on (UTC+02:00):** 2026-03-30T01:41:57+02:00 +- **Workflow:** /wf-validate-change (proposal-stage dry-run validation) +- **Strict command:** `openspec validate ci-02-trustworthy-green-checks --strict` +- **Result:** PASS + +## Scope Summary + +- **Primary capability:** `trustworthy-green-checks` +- **Focus:** make required CI/review/hook signals explicitly blocking, preserve advisory signals as advisory, and close release-PR and workflow-validation blind spots +- **Declared dependencies:** `ci-01-pr-orchestrator-log-artifacts`; `code-review-08-review-run-integration` + +## Breaking-Change Analysis (Dry-Run) + +- No production CLI or runtime API change is proposed. +- The expected behavior change is governance-only: some currently green-but-advisory checks may become blocking once implemented. +- Release PRs may run more validation than they do today if parity cannot be proven safely. + +## Dependency and Integration Review + +- The change stays within CI/review/hook ownership and does not reassign docs-review ownership. +- CodeRabbit remains advisory; the proposal standardizes branch-target coverage rather than turning review findings into a hard merge gate by itself. +- The change is compatible with existing docs-review and SpecFact validation workflows. + +## Validation Outcome + +- Required artifacts are present and parseable. +- Strict OpenSpec validation passed. +- The change is appropriately scoped as a proposal-stage CI/review hardening effort. diff --git a/openspec/changes/ci-02-trustworthy-green-checks/TDD_EVIDENCE.md b/openspec/changes/ci-02-trustworthy-green-checks/TDD_EVIDENCE.md new file mode 100644 index 00000000..82ec37e0 --- /dev/null +++ b/openspec/changes/ci-02-trustworthy-green-checks/TDD_EVIDENCE.md @@ -0,0 +1,52 @@ +# TDD Evidence for ci-02-trustworthy-green-checks + +## Pre-Implementation Test Failure (Expected) + +### Test Run: 2026-03-30 - Pre-commit Review Report Failure Handling + +**Command:** + +```bash +cd /home/dom/git/nold-ai/specfact-cli +python3 -m pytest tests/unit/scripts/test_pre_commit_code_review.py -q -k 'missing_report or rejects_non_object_json' +``` + +**Result:** ✅ 2 tests failed as expected before implementation. + +**Failure summary:** + +- `test_main_missing_report_still_returns_exit_code_and_warns` failed because the hook did not + create the `.specfact/` parent directory before invoking the subprocess and treated a missing + review report as a non-fatal outcome. +- `test_print_summary_rejects_non_object_json` failed because malformed report content printed an + error but still returned exit code `0`. + +**Status:** ✅ TDD workflow confirmed for review-report enforcement hardening. + +## Post-Implementation Test Success + +### Test Run: 2026-03-30 - Hardened Review Report and Doc Frontmatter Validation Slice + +**Commands:** + +```bash +cd /home/dom/git/nold-ai/specfact-cli +python3 -m pytest tests/unit/scripts/test_pre_commit_code_review.py -q -k 'missing_report or rejects_non_object_json or count_findings_by_severity' +python3 -m pytest tests/unit/scripts/test_doc_frontmatter/test_validation.py -q -k 'fix_hint_for_missing_frontmatter or fix_hint_for_invalid_owner' +python3 -m pytest tests/integration/scripts/test_doc_frontmatter/test_integration.py -q -k 'complete_validation_workflow or validation_with_all_valid_files or cli_with_fix_hint_flag' +``` + +**Result:** ✅ All targeted tests passed after implementation. + +**Passing summary:** + +- `scripts/pre_commit_code_review.py` now creates the review-report directory, validates + `.specfact/code-review.json` with Pydantic models, and fails non-zero when the report is missing + or malformed. +- `scripts/check_doc_frontmatter.py` now preserves fallback discovery behavior while emitting + debug-level diagnostics when `parse_frontmatter(file_path)` raises file/YAML errors. +- The CLI root command now propagates validation exit codes correctly, so `--fix-hint` failures do + not return a false green status. +- Helper fixtures and tests now assert the implemented required fields and valid owner behavior. + +**Status:** ✅ Remediation implemented and verified. diff --git a/openspec/changes/ci-02-trustworthy-green-checks/design.md b/openspec/changes/ci-02-trustworthy-green-checks/design.md new file mode 100644 index 00000000..f74c8efa --- /dev/null +++ b/openspec/changes/ci-02-trustworthy-green-checks/design.md @@ -0,0 +1,67 @@ +# Design: trustworthy-green-checks + +## Context + +The current quality surface has four enforcement layers: + +1. Local git hooks and smart checks +2. GitHub Actions workflows +3. PR review automation (CodeRabbit) +4. OpenSpec/process evidence + +The main weakness is not missing tools; it is inconsistent semantics between these layers. A hard-fail signal in one layer can be advisory in another, and some checks look blocking in the UI even though their shell commands intentionally suppress failure. + +## Design goals + +- Make required signals unambiguous. +- Preserve advisory signals where they are useful, but name and wire them as advisory. +- Minimize duplicate logic across local hooks and CI. +- Keep release-forward PRs fast only when fast-path safety is provable. + +## Enforcement model + +### 1. Gate taxonomy + +Each workflow check must be classified as one of: + +- **Required gate**: failure exits non-zero and must block merge. +- **Advisory gate**: failure is surfaced as warning/log/report but does not block merge. + +The implementation should avoid mixed semantics such as required-looking jobs that use `|| echo`, broad `continue-on-error`, or "warning-only" shell wrappers without an explicit advisory job name. + +### 2. Release PR parity + +`dev -> main` can skip re-running the full suite only if the release PR head is exactly the already-validated commit set from `dev`, with no follow-up commits that change workflow/config/spec/release metadata after the validated merge tip. + +If parity cannot be proven cheaply and deterministically, the workflow must run the required validation set again. + +### 3. Workflow-change enforcement + +Changes under `.github/workflows/**` must trigger mandatory CI validation for: + +- workflow syntax and actionlint rules +- shell fragments used by workflow `run:` steps where supported + +This closes the current gap where workflow correctness depends too heavily on local tooling or bot analysis. + +### 4. Local-vs-CI parity + +The supported pre-commit installation path must expose the same core enforcement semantics as CI for: + +- module signature verification +- formatter safety +- Python review gate for changed Python files +- workflow/static config validation when relevant files are staged + +The exact implementation can either expand `.pre-commit-config.yaml` or ensure the repo-supported setup always installs the smart-check wrapper as the authoritative hook path. + +### 5. Review automation coverage + +CodeRabbit should not silently treat `dev` and `main` differently for automatic review coverage when both are active PR targets. The change only standardizes review coverage and expectations; it does not by itself turn CodeRabbit findings into a merge blocker. + +## Out of scope + +- Replacing CodeRabbit with a different review system +- Rewriting the full orchestrator into reusable workflows +- Changing production CLI behavior +- Reworking OpenSpec governance schemas diff --git a/openspec/changes/ci-02-trustworthy-green-checks/proposal.md b/openspec/changes/ci-02-trustworthy-green-checks/proposal.md new file mode 100644 index 00000000..6dba65c8 --- /dev/null +++ b/openspec/changes/ci-02-trustworthy-green-checks/proposal.md @@ -0,0 +1,55 @@ +# Change: Trustworthy green checks for CI, pre-commit, and PR review + +## Why + +The repository has good coverage breadth across local hooks, workflow checks, contract validation, docs review, and AI review. The problem is enforcement consistency: several jobs that look authoritative are still advisory, local pre-commit behavior is stronger in the custom smart-check script than in `.pre-commit-config.yaml`, CodeRabbit auto-review is scoped to `dev` PRs but not `main`-bound release PRs, and the `dev -> main` fast path can skip validation for follow-up commits. Those gaps make it too easy for GitHub to show mostly green checks while some important signals are degraded or not enforced. + +If maintainers cannot trust that "green" means the required checks really passed, the quality-gate surface becomes governance theater instead of a release control. + +## What Changes + +- **EXTEND** `.github/workflows/pr-orchestrator.yml` so required jobs fail on required tool failures instead of swallowing them behind warn-only shell patterns. +- **NEW** gate taxonomy and naming rules for CI jobs so advisory jobs are explicitly named and never masquerade as hard merge gates. +- **EXTEND** release PR validation semantics for `dev -> main` so test skipping is only allowed when commit parity is provable; otherwise release PRs must re-run the required validation set. +- **EXTEND** workflow/static validation so `.github/workflows/**` changes always run mandatory workflow lint and shell validation in CI, not only via local tooling or bot review. +- **ALIGN** local pre-commit enforcement with the repository smart-check path so contributors who install the supported hooks get the same core gating semantics that CI expects. +- **EXTEND** AI review coverage so PRs targeting `main` receive the same automatic review posture as PRs targeting `dev` for the configured CodeRabbit review surface. +- **REMEDIATE** repo review findings in archived doc-frontmatter OpenSpec artifacts, docs, changelog entries, and helper tests so archived/main specs are publishable and markdown/config review findings are actually cleared rather than deferred. +- **HARDEN** code-review report handling and doc-frontmatter validation diagnostics so malformed review JSON and frontmatter parse failures surface actionable errors instead of being silently downgraded. + +## Capabilities + +### New Capabilities + +- `trustworthy-green-checks`: the repository distinguishes blocking versus advisory signals explicitly and only reports required PR status as green when the required validation actually passed. + +### Modified Capabilities + +- `docs-review-gate`: remains a dedicated docs-only validation path, but its required/optional status must be explicit in repository policy and branch protection guidance. +- `code-review-module`: PR review output remains advisory unless explicitly promoted to a required gate; branch targets must not silently change auto-review coverage. + +## Impact + +- **Affected CI**: `.github/workflows/pr-orchestrator.yml`, `.github/workflows/pre-merge-check.yml`, and any newly added workflow-lint workflow or required job wiring. +- **Affected local tooling**: `.pre-commit-config.yaml`, `scripts/pre-commit-smart-checks.sh`, and associated developer setup/docs. +- **Affected review automation**: `.coderabbit.yaml` target-branch scope and review expectations for `dev` and `main`. +- **Affected docs/spec artifacts**: archived `doc-frontmatter-schema` artifacts, main OpenSpec specs, `CONTRIBUTING.md`, `docs/contributing/docs-sync.md`, and `CHANGELOG.md`. +- **Affected helper/runtime code**: `scripts/pre_commit_code_review.py`, `scripts/check_doc_frontmatter.py`, and associated tests/helpers. +- **User-facing impact**: none on CLI behavior; this is release-governance hardening for maintainers and contributors. +- **Branch protection impact**: required-check recommendations may need to be updated so only hard gates are marked required. + +## Dependencies + +- **Hard blocker**: `ci-01-pr-orchestrator-log-artifacts` baseline must remain intact because this change builds on the orchestrator job model rather than replacing it. +- **Hard blocker**: `code-review-08-review-run-integration` must remain the authoritative review-run integration surface. +- **Soft alignment**: `docs-04-docs-review-gate-and-link-integrity` informs the required/advisory treatment of docs-only checks but does not block this change. + +## Source Tracking + + +- **GitHub Issue**: #465 +- **Issue URL**: https://github.com/nold-ai/specfact-cli/issues/465 +- **Parent Feature**: #406 +- **Parent Feature URL**: https://github.com/nold-ai/specfact-cli/issues/406 +- **Last Synced Status**: open +- **Sanitized**: true diff --git a/openspec/changes/ci-02-trustworthy-green-checks/specs/trustworthy-green-checks/spec.md b/openspec/changes/ci-02-trustworthy-green-checks/specs/trustworthy-green-checks/spec.md new file mode 100644 index 00000000..6fb1d802 --- /dev/null +++ b/openspec/changes/ci-02-trustworthy-green-checks/specs/trustworthy-green-checks/spec.md @@ -0,0 +1,62 @@ +## ADDED Requirements + +### Requirement: Required CI jobs fail on required tool failures + +Workflow jobs classified as required merge gates SHALL exit non-zero when their underlying required tool fails, and SHALL NOT suppress the failure behind warn-only shell patterns or broad continue-on-error handling. + +#### Scenario: Required compatibility or contract job encounters a tool failure + +- **WHEN** a required job in `pr-orchestrator.yml` runs a compatibility, contract, lint, or equivalent required validation command +- **AND** the underlying command exits non-zero +- **THEN** the workflow job exits non-zero +- **AND** the GitHub check does not report success for that job + +### Requirement: Advisory jobs are explicit and non-deceptive + +Non-blocking validation jobs SHALL be labeled and documented as advisory so maintainers can distinguish useful warnings from required merge gates. + +#### Scenario: Repository keeps a warn-only validation job + +- **WHEN** a workflow contains a non-blocking validation step or job +- **THEN** the job name or step output clearly marks it as advisory +- **AND** branch protection guidance does not rely on that advisory check as evidence that required validation passed + +### Requirement: Main-bound release PRs only skip tests when parity is provable + +`dev -> main` pull requests SHALL skip the full required validation set only when the workflow can prove the PR head is already-covered by the validated `dev` commit set without additional follow-up commits affecting release safety. + +#### Scenario: Release PR contains follow-up commits after the last validated dev merge tip + +- **WHEN** a `dev -> main` pull request includes additional commits beyond the already-validated merge tip from `dev` +- **THEN** the workflow does not use the fast-path skip +- **AND** the required validation set runs again for the release PR + +### Requirement: Workflow changes trigger mandatory workflow lint validation + +Changes under `.github/workflows/**` SHALL trigger mandatory CI validation for workflow syntax and supported shell/static checks so workflow regressions cannot rely solely on local tooling or bot review. + +#### Scenario: Pull request modifies a GitHub Actions workflow + +- **WHEN** a pull request changes one or more files under `.github/workflows/` +- **THEN** CI runs mandatory workflow validation for those changes +- **AND** a workflow-lint failure blocks the required check from reporting success + +### Requirement: Supported local pre-commit installation matches core CI gate semantics + +The repository-supported pre-commit installation path SHALL enforce the same core gate semantics that CI relies on for changed files, rather than leaving stronger checks only in an optional wrapper unknown to standard contributors. + +#### Scenario: Contributor installs the documented local hooks + +- **WHEN** a contributor follows the repository-supported hook installation path +- **THEN** staged Python, workflow, Markdown, and module-signature relevant changes receive the documented local gate coverage +- **AND** the contributor is not relying on a weaker default `.pre-commit` path than the one CI and maintainer guidance assume + +### Requirement: Automatic PR review coverage includes both active protected targets + +Automatic repository review configuration SHALL cover pull requests targeting both `dev` and `main` when both branches are active protected integration targets. + +#### Scenario: Release-forward pull request targets main + +- **WHEN** a pull request targets `main` +- **THEN** the configured automatic review system applies the same target-branch auto-review policy used for `dev` +- **AND** release PRs do not silently lose their default automated review pass diff --git a/openspec/changes/ci-02-trustworthy-green-checks/tasks.md b/openspec/changes/ci-02-trustworthy-green-checks/tasks.md new file mode 100644 index 00000000..6f23773a --- /dev/null +++ b/openspec/changes/ci-02-trustworthy-green-checks/tasks.md @@ -0,0 +1,74 @@ +# Tasks: ci-02-trustworthy-green-checks + +## TDD / SDD order (enforced) + +Per `openspec/config.yaml`, tests before code for any behavior-changing task. Order: (1) Spec deltas, (2) Tests from scenarios (expect failure), (3) Code last. Do not implement workflow/config changes until tests or validation checks exist and have been run with failing evidence where behavior changes are enforced. + +--- + +## 1. Create git worktree for this change + +- [ ] 1.1 Fetch latest and create a worktree with a new branch from `origin/dev`. + - [ ] 1.1.1 `git fetch origin` + - [ ] 1.1.2 `git worktree add ../specfact-cli-worktrees/feature/ci-02-trustworthy-green-checks -b feature/ci-02-trustworthy-green-checks origin/dev` + - [ ] 1.1.3 Change into the worktree: `cd ../specfact-cli-worktrees/feature/ci-02-trustworthy-green-checks` + - [ ] 1.1.4 Run `hatch env create` in the worktree to create the virtual environment and install + the dev dependencies required by the documented repository workflow. + - [ ] 1.1.5 Verify the branch: `git branch --show-current` + +## 2. Spec-first preparation + +- [ ] 2.1 Finalize the gate taxonomy in `specs/trustworthy-green-checks/spec.md`. +- [ ] 2.2 Audit current enforcement semantics in: + - [ ] 2.2.1 `.github/workflows/pr-orchestrator.yml` + - [ ] 2.2.2 `.github/workflows/pre-merge-check.yml` + - [ ] 2.2.3 `.pre-commit-config.yaml` + - [ ] 2.2.4 `scripts/pre-commit-smart-checks.sh` + - [ ] 2.2.5 `.coderabbit.yaml` + - [ ] 2.2.6 archived/main doc-frontmatter specs, changelog, and contributor docs for review-driven markdown/spec drift + +## 3. Test-first / validation-first evidence + +- [ ] 3.1 Add or update workflow/unit tests that prove required jobs fail when underlying tools fail and that advisory jobs are explicitly marked as advisory. +- [ ] 3.2 Add or update tests for `dev -> main` skip semantics so follow-up commits invalidate unsafe fast-path assumptions. +- [ ] 3.3 Add or update tests for pre-commit parity or supported-hook installation behavior. +- [ ] 3.4 Add or update tests for review JSON failure handling and doc-frontmatter helper expectations. +- [ ] 3.4 Run the new/updated tests before implementation and capture failing evidence in `TDD_EVIDENCE.md`. + +## 4. Implementation: CI hardening + +- [ ] 4.1 Remove failure-swallowing patterns from required jobs in `pr-orchestrator.yml`. +- [ ] 4.2 Rename or isolate remaining advisory jobs so their non-blocking status is explicit in job names and logs. +- [ ] 4.3 Add mandatory workflow validation in CI for `.github/workflows/**` changes. +- [ ] 4.4 Rework `dev -> main` skip logic so only provably safe parity skips are allowed; otherwise run the required validation set. +- [ ] 4.5 Keep docs-only validation behavior explicit and compatible with docs-review workflow ownership. + +## 5. Implementation: local and review parity + +- [ ] 5.1 Align `.pre-commit-config.yaml` with the supported smart-check path, or make the supported hook-install path authoritative and explicit in repo docs. +- [ ] 5.2 Update `.coderabbit.yaml` so automatic review coverage includes both `dev` and `main` PR targets. +- [ ] 5.3 Document which review outputs are advisory versus merge-blocking. + +## 6. Implementation: review remediation and publication readiness + +- [ ] 6.1 Fix valid markdownlint/style findings in `CHANGELOG.md`, `CONTRIBUTING.md`, and `docs/contributing/docs-sync.md`. +- [ ] 6.2 Fix valid archived/main OpenSpec spec publication issues (purpose text, heading spacing, unique headings, API-name drift, and scenario completeness). +- [ ] 6.3 Harden `scripts/pre_commit_code_review.py` report parsing/output handling and `scripts/check_doc_frontmatter.py` parse-failure diagnostics. +- [ ] 6.4 Align helper fixtures/tests with implemented doc-frontmatter behavior. + +## 7. Quality gates and documentation + +- [ ] 7.1 `hatch run format` +- [ ] 7.2 `hatch run type-check` +- [ ] 7.3 `hatch run lint` +- [ ] 7.4 `hatch run yaml-lint` +- [ ] 7.5 `hatch run lint-workflows` +- [ ] 7.6 Run targeted tests for workflow, hook, and doc-frontmatter remediation changes. +- [ ] 7.7 Update contributor docs / CI docs describing trustworthy green-check semantics. +- [ ] 7.8 Run `openspec validate ci-02-trustworthy-green-checks --strict` and resolve all issues. + +## 8. Delivery + +- [ ] 8.1 Update `openspec/CHANGE_ORDER.md` with implementation status when work begins/lands. +- [ ] 8.2 Stage and commit with a Conventional Commit message. +- [ ] 8.3 Push `feature/ci-02-trustworthy-green-checks` and open a PR to `dev`. diff --git a/openspec/config.yaml b/openspec/config.yaml index 524f18ed..1bcde599 100644 --- a/openspec/config.yaml +++ b/openspec/config.yaml @@ -61,6 +61,11 @@ context: | `openspec/changes//TDD_EVIDENCE.md` for every behavior change. - If the pattern does not work in practice, adjust the process until it does. + Code review JSON (dogfood): Treat `.specfact/code-review.json` as mandatory evidence before an OpenSpec + change is complete. If the file is missing or stale (see tasks), run + `specfact code review run --json --out .specfact/code-review.json` and resolve **every** finding at any + severity (warning or error) unless the proposal documents a rare, explicit exception. + # Per-artifact rules (only injected into matching artifacts) rules: proposal: @@ -158,3 +163,19 @@ rules: - body following `.github/ISSUE_TEMPLATE/change_proposal.md` template (Why, What Changes sections from proposal) - footer `*OpenSpec Change Proposal: *` - After creation, update proposal.md Source Tracking section with issue number, URL, repository, and status. + - |- + SpecFact code review JSON (dogfood, required before PR): Include tasks to + - (1) Ensure `.specfact/code-review.json` is present and **fresh**: if the file is missing, or its + last-modified time is older than any file you changed for the change **excluding only** + `openspec/changes//TDD_EVIDENCE.md` (evidence-only updates there do not by themselves + invalidate the review file). This freshness comparison explicitly includes behavior-affecting + workflow and config paths such as `.github/workflows/`, `.pre-commit-config.yaml`, and + `pyproject.toml` in addition to code, tests, scripts, and change artifacts. Run a new review, e.g. + `hatch run specfact code review run --json --out .specfact/code-review.json` with `--scope changed` + during iteration and `--scope full` (or equivalent coverage) before the final PR. + - (2) Read the JSON report and remediate **all** findings regardless of severity (warning, advisory, + error, or equivalent in the schema): treat them as blocking until fixed or the proposal explicitly + documents a justified exception approved in the change. + - (3) Re-run the review after substantive edits until the report shows a passing outcome per the + review module (e.g. overall verdict PASS / CI exit 0); record the review command(s) and timestamp + in `TDD_EVIDENCE.md` or the PR description when the change touches behavior or quality gates. diff --git a/openspec/specs/doc-frontmatter-schema/spec.md b/openspec/specs/doc-frontmatter-schema/spec.md new file mode 100644 index 00000000..510d79b0 --- /dev/null +++ b/openspec/specs/doc-frontmatter-schema/spec.md @@ -0,0 +1,139 @@ +# doc-frontmatter-schema Specification + +## Purpose + +This specification defines the authoritative YAML frontmatter schema for repository documentation so +enforced docs carry stable ownership, tracking, and exemption metadata that validators and +contributors must interpret consistently. + +## Requirements + +### Requirement: YAML Frontmatter Format + +The system SHALL support YAML frontmatter in Markdown documentation files with the following schema: + +```yaml +--- +title: "" +doc_owner: # owning module or known token +tracks: + - # files/directories this doc tracks +last_reviewed: YYYY-MM-DD +exempt: false # true for stable/legal docs +exempt_reason: "" # required if exempt: true +--- +``` + +#### Scenario: Valid frontmatter structure + +- **WHEN** a Markdown file contains properly formatted YAML frontmatter +- **THEN** the frontmatter SHALL be parsed successfully +- **AND** all required fields SHALL be present + +#### Scenario: Missing required fields + +- **WHEN** a Markdown file has frontmatter missing required fields +- **THEN** the validation SHALL fail with clear error message +- **AND** the error SHALL specify which fields are missing + +### Requirement: Owner Identifier Resolution + +The system SHALL support two types of owner identifiers: + +1. Path-like identifiers (for example `src/specfact/parser`) +2. Known tokens (for example `specfact-cli`, `nold-ai`, `openspec`) + +#### Scenario: Path-like owner resolution + +- **WHEN** `doc_owner` is a path-like identifier +- **THEN** the system SHALL verify the path exists in the repository +- **AND** validation SHALL pass if path exists + +#### Scenario: Known token resolution + +- **WHEN** `doc_owner` is a known token +- **THEN** the system SHALL verify the token is in `VALID_OWNER_TOKENS` +- **AND** validation SHALL pass if the token is valid + +#### Scenario: Invalid owner identifier + +- **WHEN** `doc_owner` cannot be resolved +- **THEN** validation SHALL fail with a resolution error +- **AND** the error SHALL suggest valid alternatives + +### Requirement: Glob Pattern Tracking + +The system SHALL support glob patterns in `tracks` to specify which files or directories the +documentation should stay synchronized with. + +#### Scenario: Single glob pattern + +- **WHEN** `tracks` contains a single valid glob pattern +- **THEN** the pattern SHALL validate successfully + +#### Scenario: Multiple glob patterns + +- **WHEN** `tracks` contains multiple glob patterns +- **THEN** all patterns SHALL be validated +- **AND** validation SHALL pass if all patterns are valid + +#### Scenario: Invalid glob pattern + +- **WHEN** `tracks` contains an invalid glob pattern +- **THEN** validation SHALL fail with a pattern error + +### Requirement: Exemption Handling + +The system SHALL support document exemptions for stable or legal documentation that does not need +synchronization checks. + +#### Scenario: Valid exemption + +- **WHEN** `exempt: true` is paired with a non-empty `exempt_reason` +- **THEN** validation SHALL pass +- **AND** the document SHALL be excluded from sync checks + +#### Scenario: Exemption without reason + +- **WHEN** `exempt: true` is set but `exempt_reason` is empty +- **THEN** validation SHALL fail +- **AND** the error SHALL require an exemption reason + +### Requirement: Frontmatter Extraction + +The system SHALL provide a function to extract frontmatter from Markdown files. + +#### Scenario: Extract from file with frontmatter + +- **WHEN** `parse_frontmatter(path)` is called on a file with valid frontmatter +- **THEN** the function SHALL return the parsed frontmatter dictionary +- **AND** the original file content SHALL remain unchanged + +#### Scenario: Extract from file without frontmatter + +- **WHEN** `parse_frontmatter(path)` is called on a file without frontmatter +- **THEN** the function SHALL return an empty dictionary +- **AND** no error SHALL be raised + +## Contract Requirements + +### Requirement: Input Validation Contracts + +All public functions SHALL use `@icontract` decorators for input validation: + +- `@require` for preconditions +- `@ensure` for postconditions + +#### Scenario: Invalid input type + +- **WHEN** a function receives an invalid input type +- **THEN** the `@require` contract SHALL raise an appropriate exception + +### Requirement: Type Safety Contracts + +All public functions SHALL use `@beartype` decorators for runtime type checking. + +#### Scenario: Type mismatch + +- **WHEN** a function receives an argument of the wrong type +- **THEN** `@beartype` SHALL raise a clear `TypeError` diff --git a/openspec/specs/doc-frontmatter-validation/spec.md b/openspec/specs/doc-frontmatter-validation/spec.md new file mode 100644 index 00000000..f431ba37 --- /dev/null +++ b/openspec/specs/doc-frontmatter-validation/spec.md @@ -0,0 +1,170 @@ +# doc-frontmatter-validation Specification + +## Purpose + +This specification governs the repository validator that enforces doc-frontmatter ownership rules, +fix hints, and rollout scope so maintainers can trust documentation metadata checks locally and in +CI. + +## Requirements + +### Requirement: Validation Script Implementation + +The system SHALL provide a validation script, `scripts/check_doc_frontmatter.py`, that enforces +frontmatter requirements. + +#### Scenario: Script execution with valid docs + +- **WHEN** `check_doc_frontmatter.py` is executed +- **AND** all tracked docs have valid frontmatter +- **THEN** the script SHALL exit with code `0` +- **AND** output SHALL show a success message + +#### Scenario: Script execution with invalid docs + +- **WHEN** `check_doc_frontmatter.py` is executed +- **AND** some docs have invalid frontmatter +- **THEN** the script SHALL exit with code `1` +- **AND** output SHALL list all validation failures + +### Requirement: Missing Doc Owner Detection + +The system SHALL detect documentation files that are missing the `doc_owner` field. + +#### Scenario: Missing doc_owner in tracked file + +- **WHEN** a tracked Markdown file lacks `doc_owner` +- **THEN** validation SHALL fail +- **AND** the error SHALL specify the missing field + +#### Scenario: Valid doc_owner present + +- **WHEN** a tracked Markdown file has a valid `doc_owner` +- **THEN** validation SHALL pass for the owner requirement + +### Requirement: Owner Resolution Validation + +The system SHALL validate that `doc_owner` values resolve to existing paths or known tokens. + +#### Scenario: Owner resolves to existing path + +- **WHEN** `doc_owner` is a path that exists in the repository +- **THEN** validation SHALL pass for owner resolution + +#### Scenario: Owner is valid known token + +- **WHEN** `doc_owner` is in `VALID_OWNER_TOKENS` +- **THEN** validation SHALL pass for owner resolution + +#### Scenario: Owner cannot be resolved + +- **WHEN** `doc_owner` does not resolve to a path or token +- **THEN** validation SHALL fail +- **AND** the error SHALL suggest valid alternatives + +### Requirement: Fix Hint Generation + +The system SHALL provide helpful fix hints when validation fails. + +#### Scenario: Fix hint for missing frontmatter + +- **WHEN** validation fails due to missing frontmatter +- **AND** the `--fix-hint` flag is used +- **THEN** output SHALL include a suggested frontmatter template +- **AND** the template SHALL include `doc_owner`, `tracks`, `last_reviewed`, `exempt`, and + `exempt_reason` + +#### Scenario: Fix hint for invalid owner + +- **WHEN** validation fails due to an invalid owner +- **AND** the `--fix-hint` flag is used +- **THEN** output SHALL suggest valid owner alternatives + +### Requirement: Tracked Files Discovery + +The system SHALL discover all Markdown files that should be tracked for frontmatter validation. + +#### Scenario: Discover files in docs directory + +- **WHEN** the script runs +- **THEN** all `docs/**/*.md` files SHALL be discoverable +- **AND** exempt files SHALL be excluded + +#### Scenario: Discover root-level docs + +- **WHEN** the script runs +- **THEN** configured root-level docs SHALL be discoverable +- **AND** exempt files SHALL be excluded + +#### Scenario: Full-site validation ignores rollout list + +- **WHEN** `check_doc_frontmatter.py --all-docs` is executed +- **THEN** the validator SHALL inspect every discovered documentation file +- **AND** it SHALL not require `docs/.doc-frontmatter-enforced` to define the validation scope + +### Requirement: Exempt Files Handling + +The system SHALL properly handle files marked as exempt. + +#### Scenario: Exempt file with valid reason + +- **WHEN** a file has `exempt: true` with a valid `exempt_reason` +- **THEN** the file SHALL be excluded from validation + +#### Scenario: Non-exempt file validation + +- **WHEN** a file has `exempt: false` or no exemption +- **THEN** the file SHALL undergo full validation + +## Contract Requirements + +### Requirement: Validation Contracts + +The validation module SHALL use `@icontract` decorators on public validation helpers such as +`parse_frontmatter`, `extract_doc_owner`, `resolve_owner`, `validate_glob_patterns`, +`suggest_frontmatter`, `get_all_md_files`, `rg_missing_doc_owner`, and `main`. + +#### Scenario: Invalid public helper input + +- **WHEN** one of the public validation helpers receives an invalid input +- **THEN** its `@require` or `@ensure` contract SHALL enforce the documented pre/postcondition + +### Requirement: Error Handling Contracts + +The script SHALL handle file and YAML errors gracefully while preserving validator outcomes. + +#### Scenario: File read or YAML parse error + +- **WHEN** the script encounters a file read error or YAML parse error +- **THEN** the error SHALL be surfaced in validation output +- **AND** validation SHALL continue for the remaining files + +## Performance Requirements + +### Requirement: Efficient File Processing + +The validation script SHALL process files efficiently. + +#### Scenario: Large documentation set + +- **WHEN** the script processes 100 or more documentation files +- **THEN** execution SHALL complete quickly enough for local and CI use + +### Requirement: PR Orchestrator Parallel Job Graph + +The PR orchestrator workflow SHALL not serialize independent validation jobs behind the Python 3.12 +test suite when those jobs do not consume test artifacts. + +#### Scenario: Independent jobs start after shared signature gate + +- **WHEN** `.github/workflows/pr-orchestrator.yml` defines `compat-py311`, `contract-first-ci`, + `type-checking`, `linting`, and `cli-validation` +- **THEN** each of those jobs SHALL depend on `changes` and the shared signature gate +- **AND** none of those jobs SHALL list `tests` as a required predecessor unless they consume test + artifacts from that job + +#### Scenario: Coverage-based advisory gate still depends on tests + +- **WHEN** `quality-gates` reads the `coverage-reports` artifact +- **THEN** `quality-gates` SHALL keep `tests` as an explicit dependency +- **AND** the workflow SHALL continue to gate that job on unit-coverage availability diff --git a/openspec/specs/docs-contributing-updates/spec.md b/openspec/specs/docs-contributing-updates/spec.md new file mode 100644 index 00000000..1298a56a --- /dev/null +++ b/openspec/specs/docs-contributing-updates/spec.md @@ -0,0 +1,144 @@ +# docs-contributing-updates Specification + +## Purpose + +This specification is the authoritative source for contributor-facing documentation updates related +to doc-frontmatter ownership and validation, and maintainers SHALL keep the guidance aligned with +the implemented validator so onboarding, remediation, and CI usage remain traceable and enforceable. + +## Requirements + +### Requirement: Frontmatter Documentation + +The system SHALL provide comprehensive documentation for the frontmatter schema and validation +system. + +#### Scenario: Schema reference documentation + +- **WHEN** a user reads `docs/contributing/docs-sync.md` +- **THEN** they SHALL find the complete frontmatter schema reference +- **AND** examples for all field types + +#### Scenario: Validation workflow documentation + +- **WHEN** a user reads the contributing docs +- **THEN** they SHALL understand the validation workflow +- **AND** know how to fix validation errors + +### Requirement: Getting Started Guide + +The system SHALL provide a getting started guide for new contributors. + +#### Scenario: New contributor setup + +- **WHEN** a new contributor reads the docs +- **THEN** they SHALL find setup instructions +- **AND** example frontmatter for common cases + +#### Scenario: Common patterns documentation + +- **WHEN** a user reads the documentation +- **THEN** they SHALL find common frontmatter patterns +- **AND** best practices for different doc types + +### Requirement: Troubleshooting Guide + +The system SHALL provide troubleshooting guidance for validation issues. + +#### Scenario: Error message reference + +- **WHEN** a user encounters a validation error +- **THEN** they SHALL find an error reference in the docs +- **AND** a step-by-step resolution guide + +#### Scenario: Fix hint examples + +- **WHEN** a user needs help with fix hints +- **THEN** they SHALL find examples in the documentation +- **AND** explanations of the fix-hint format + +### Requirement: Integration Documentation + +The system SHALL document how the validation integrates with existing workflows. + +#### Scenario: Pre-commit integration docs + +- **WHEN** a user reads the integration docs +- **THEN** they SHALL understand pre-commit hook setup +- **AND** available configuration options + +#### Scenario: CI integration documentation + +- **WHEN** a user reads the integration docs +- **THEN** they SHALL find CI workflow documentation +- **AND** branch protection setup guidance + +### Requirement: Examples and Templates + +The system SHALL provide practical examples and templates. + +#### Scenario: Frontmatter template examples + +- **WHEN** a user needs a frontmatter template +- **THEN** they SHALL find examples for different doc types +- **AND** copy-paste-ready templates + +#### Scenario: Real-world examples + +- **WHEN** a user reads the documentation +- **THEN** they SHALL find real-world examples +- **AND** explanations of design decisions + +## Contract Requirements + +### Requirement: Documentation Completeness + +All documentation SHALL be complete and accurate. + +#### Scenario: Complete schema documentation + +- **WHEN** a user reads the schema docs +- **THEN** all frontmatter fields SHALL be documented +- **AND** examples SHALL be provided + +### Requirement: Documentation Accuracy + +Documentation SHALL accurately reflect the current implementation. + +#### Scenario: Accurate workflow description + +- **WHEN** a user follows the documented workflow +- **THEN** it SHALL work as described +- **AND** produce the expected results + +## Quality Requirements + +### Requirement: Readability + +Documentation SHALL be well-written and easy to understand. + +#### Scenario: Clear and concise writing + +- **WHEN** a user reads the documentation +- **THEN** the content SHALL be clear and concise +- **AND** free of unnecessary jargon where possible + +### Requirement: Organization + +Documentation SHALL be well-organized. + +#### Scenario: Logical structure + +- **WHEN** a user navigates the documentation +- **THEN** the structure SHALL be logical +- **AND** easy to follow + +### Requirement: Maintainability + +Documentation SHALL be easy to maintain. + +#### Scenario: Easy updates + +- **WHEN** a maintainer updates the documentation +- **THEN** the changes SHALL be straightforward +- **AND** the structure SHALL support easy updates diff --git a/pyproject.toml b/pyproject.toml index 4cb884b4..1117c278 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "specfact-cli" -version = "0.43.1" +version = "0.43.2" description = "The swiss knife CLI for agile DevOps teams. Keep backlog, specs, tests, and code in sync with validation and contract enforcement for new projects and long-lived codebases." readme = "README.md" requires-python = ">=3.11" @@ -220,7 +220,8 @@ yaml-check-all = "bash scripts/yaml-tools.sh check-all {args}" # Docs validation (docs-12): command examples vs CLI; modules.specfact.io URLs in docs check-docs-commands = "python scripts/check-docs-commands.py" check-cross-site-links = "python scripts/check-cross-site-links.py" -docs-validate = "python scripts/check-docs-commands.py && python scripts/check-cross-site-links.py --warn-only" +doc-frontmatter-check = "python scripts/check_doc_frontmatter.py" +docs-validate = "python scripts/check-docs-commands.py && python scripts/check-cross-site-links.py --warn-only && python scripts/check_doc_frontmatter.py" # Legacy entry (kept for compatibility); prefer `workflows-lint` above lint-workflows = "bash scripts/run_actionlint.sh {args}" @@ -718,6 +719,14 @@ ignore = [ "SLF001", # private member accessed ] +"tests/unit/tools/test_smart_test_coverage.py" = [ + "E402", # imports after sys.path bootstrap for tool harness loading +] + +"tests/unit/tools/test_smart_test_coverage_enhanced.py" = [ + "E402", # imports after sys.path bootstrap for tool harness loading +] + # Tools and scripts can be more lenient "tools/**/*" = [ "T20", # print found diff --git a/scripts/check-cross-site-links.py b/scripts/check-cross-site-links.py old mode 100644 new mode 100755 diff --git a/scripts/check-docs-commands.py b/scripts/check-docs-commands.py old mode 100644 new mode 100755 diff --git a/scripts/check_doc_frontmatter.py b/scripts/check_doc_frontmatter.py new file mode 100755 index 00000000..98174eae --- /dev/null +++ b/scripts/check_doc_frontmatter.py @@ -0,0 +1,560 @@ +#!/usr/bin/env python3 +"""Validate YAML doc ownership frontmatter for Markdown documentation.""" + +from __future__ import annotations + +import datetime +import fnmatch +import functools +import os +import re +import subprocess +import sys +from pathlib import Path +from typing import Any + +import typer +import yaml +from beartype import beartype +from icontract import ensure, require +from pydantic import BaseModel, ConfigDict, Field, ValidationError, ValidationInfo, field_validator, model_validator +from rich.console import Console +from typer.main import get_command + +from specfact_cli.common import get_bridge_logger + + +_ERR = Console(stderr=True) +_LOG = get_bridge_logger(__name__) + +FRONTMATTER_RE = re.compile(r"^---\s*\n(.*?)\n---", re.DOTALL) +DOC_OWNER_RE = re.compile(r"^\s*doc_owner\s*:\s*['\"]?(.+?)['\"]?\s*$", re.MULTILINE) +LAST_REVIEWED_RE = re.compile(r"^\d{4}-\d{2}-\d{2}$") + +TRACKED_DOC_DIRS = ("docs",) +REQUIRED_ROOT_DOCS: tuple[str, ...] = ("USAGE-FAQ.md",) +EXEMPT_FILES = frozenset( + { + "docs/LICENSE.md", + "docs/TRADEMARKS.md", + "CHANGELOG.md", + "CLA.md", + "CODE_OF_CONDUCT.md", + "SECURITY.md", + "TRADEMARKS.md", + } +) +VALID_OWNER_TOKENS = frozenset({"specfact-cli", "nold-ai", "openspec"}) +SOURCE_ROOTS = (Path("src"), Path("openspec"), Path("modules"), Path("tools")) + +_OWNER_GLOB_METACHARS = frozenset("*?[]{}") +REQUIRED_KEYS = ("title", "doc_owner", "tracks", "last_reviewed", "exempt", "exempt_reason") + + +class DocFrontmatter(BaseModel): + """Validated doc-sync ownership record (YAML frontmatter subset).""" + + model_config = ConfigDict(extra="ignore", str_strip_whitespace=False) + + title: str = Field(..., description="Document title") + doc_owner: str = Field(..., description="Owner path or known token") + tracks: list[str] = Field(..., min_length=1, description="Glob patterns for tracked sources") + last_reviewed: datetime.date = Field(..., description="Last review date (YYYY-MM-DD)") + exempt: bool = Field(..., description="Whether the page is exempt from sync rules") + exempt_reason: str = Field(..., description="Reason when exempt; empty string when not exempt") + + @field_validator("tracks", mode="before") + @classmethod + def _tracks_must_be_string_list(cls, value: object) -> list[str]: + if not isinstance(value, list) or not all(isinstance(x, str) for x in value): + raise ValueError("`tracks` must be a list of strings") + return list(value) + + @field_validator("last_reviewed", mode="before") + @classmethod + def _normalize_last_reviewed_field(cls, value: object) -> datetime.date: + if isinstance(value, datetime.datetime): + return value.date() + if isinstance(value, datetime.date): + return value + if isinstance(value, str): + s = value.strip() + if LAST_REVIEWED_RE.match(s): + y, m, d = (int(x) for x in s.split("-")) + return datetime.date(y, m, d) + raise ValueError("`last_reviewed` must be YYYY-MM-DD") + + @field_validator("exempt", mode="before") + @classmethod + def _exempt_must_be_bool(cls, value: object) -> bool: + if not isinstance(value, bool): + raise ValueError("`exempt` must be a boolean") + return value + + @field_validator("doc_owner", "title", "exempt_reason", mode="before") + @classmethod + def _required_strings(cls, value: object, info: ValidationInfo) -> str: + if not isinstance(value, str): + raise ValueError(f"`{info.field_name}` must be a string") + return value + + @model_validator(mode="after") + def _tracks_globs_valid(self) -> DocFrontmatter: + if not validate_glob_patterns(list(self.tracks)): + raise ValueError("invalid glob pattern in `tracks`") + return self + + @model_validator(mode="after") + def _owner_and_exempt_rules(self) -> DocFrontmatter: + if not resolve_owner(self.doc_owner): + raise ValueError( + "doc_owner `" + + self.doc_owner + + "` does not resolve (tokens: " + + ", ".join(sorted(VALID_OWNER_TOKENS)) + + " or repo path)", + ) + if self.exempt and not self.exempt_reason.strip(): + raise ValueError("`exempt_reason` required when exempt is true") + return self + + +DocFrontmatter.model_rebuild(_types_namespace={"datetime": datetime}) + + +def _format_doc_frontmatter_errors(exc: ValidationError) -> list[str]: + """Turn Pydantic errors into the same style as legacy manual validation.""" + out: list[str] = [] + for err in exc.errors(): + loc = err.get("loc") or () + key = str(loc[0]) if loc else "record" + typ = err.get("type", "") + if typ == "missing": + out.append(f"missing `{key}`") + continue + msg = str(err.get("msg", "validation error")) + if msg.startswith("Value error, "): + msg = msg[len("Value error, ") :] + out.append(msg) + return out + + +def _root() -> Path: + """Repository root for validation (override with DOC_FRONTMATTER_ROOT in tests).""" + env = os.environ.get("DOC_FRONTMATTER_ROOT") + return Path(env).resolve() if env else Path(__file__).resolve().parents[1] + + +def _enforced_path() -> Path: + return _root() / "docs" / ".doc-frontmatter-enforced" + + +def _path_is_existing_file(path: Path) -> bool: + return path.is_file() + + +@beartype +@require(_path_is_existing_file, "Path must be an existing file") +@ensure(lambda result: isinstance(result, dict), "Must return dict") +def parse_frontmatter(path: Path) -> dict[str, Any]: + """Parse YAML frontmatter from a Markdown file.""" + content = path.read_text(encoding="utf-8") + match = FRONTMATTER_RE.match(content) + if not match: + return {} + loaded = yaml.safe_load(match.group(1)) + return loaded if isinstance(loaded, dict) else {} + + +@beartype +@require(lambda content: isinstance(content, str), "Content must be string") +@ensure(lambda result: result is None or isinstance(result, str), "Must return string or None") +def extract_doc_owner(content: str) -> str | None: + """Extract doc_owner value from raw Markdown content.""" + match = DOC_OWNER_RE.search(content) + return match.group(1).strip() if match else None + + +def _owner_literal_is_safe(owner: str) -> bool: + """Reject empty owners, traversal segments, and glob metacharacters.""" + if not isinstance(owner, str): + return False + s = owner.strip() + if not s: + return False + if any(ch in _OWNER_GLOB_METACHARS for ch in s): + return False + parts = Path(s).as_posix().split("/") + return all(part not in ("", ".", "..") for part in parts) + + +def _is_resolved_under(child: Path, root: Path) -> bool: + """True if ``child`` resolves to a path under ``root`` (same device).""" + try: + child.resolve().relative_to(root.resolve()) + except ValueError: + return False + return True + + +def _resolve_owner_absolute_path(owner: str, base_root: Path) -> bool: + """Return True if ``owner`` is an absolute path inside ``base_root`` that exists.""" + candidate = Path(owner) + if not candidate.is_absolute(): + return False + try: + resolved = candidate.resolve() + except OSError: + return False + if not _is_resolved_under(resolved, base_root): + return False + return resolved.exists() + + +def _resolve_owner_repo_relative(owner: str, base_root: Path) -> bool: + """Return True if ``base_root/owner`` resolves under ``base_root`` and exists.""" + rel = (base_root / owner).resolve() + if not _is_resolved_under(rel, base_root): + return False + return rel.exists() + + +def _glob_owner_dir_under_root(owner: str, root: Path) -> bool: + """Return True if a directory named ``owner`` exists anywhere under ``root`` (single-segment owners).""" + try: + for match in root.glob(f"**/{owner}"): + if match.is_dir(): + return True + except OSError: + return False + return False + + +def _find_owner_under_source_roots(owner: str, base_root: Path, owner_single_segment: bool) -> bool: + """Search ``SOURCE_ROOTS`` for a path or directory name matching ``owner``.""" + for rel_root in SOURCE_ROOTS: + root = (base_root / rel_root).resolve() + if not root.exists(): + continue + joined = (root / owner).resolve() + if _is_resolved_under(joined, root) and joined.exists(): + return True + if owner_single_segment and _glob_owner_dir_under_root(owner, root): + return True + return False + + +@beartype +@require(lambda owner: isinstance(owner, str), "Owner must be string") +@ensure(lambda result: isinstance(result, bool), "Must return bool") +def resolve_owner(owner: str) -> bool: + """Return True if owner is a known token or an existing path under the repo.""" + return _resolve_owner_impl(owner, str(_root().resolve())) + + +@functools.lru_cache(maxsize=256) +def _resolve_owner_impl(owner: str, root_key: str) -> bool: + """Memoized owner resolution keyed by owner and repository root (see ``resolve_owner``).""" + if owner in VALID_OWNER_TOKENS: + return True + if not _owner_literal_is_safe(owner): + return False + base_root = Path(root_key).resolve() + candidate = Path(owner) + if candidate.is_absolute(): + return _resolve_owner_absolute_path(owner, base_root) + if _resolve_owner_repo_relative(owner, base_root): + return True + owner_single_segment = "/" not in owner.strip().rstrip("/") + return _find_owner_under_source_roots(owner, base_root, owner_single_segment) + + +@beartype +@require(lambda patterns: isinstance(patterns, list), "Patterns must be list") +@ensure(lambda result: isinstance(result, bool), "Must return bool") +def validate_glob_patterns(patterns: list[str]) -> bool: + """Return True if patterns are non-empty, balanced for `[]`/`{}`, and compile as fnmatch regex. + + Full glob semantics follow Python :func:`fnmatch.fnmatch`; this adds bracket/brace balance + checks and a best-effort :func:`fnmatch.translate` + :func:`re.compile` pass for invalid + metacharacter sequences. See ``docs/contributing/frontmatter-schema.md``. + """ + if not patterns: + return False + for pattern in patterns: + p = str(pattern).strip() + if not p: + return False + if p.count("[") != p.count("]"): + return False + if p.count("{") != p.count("}"): + return False + try: + re.compile(fnmatch.translate(p)) + except re.error: + return False + return True + + +@beartype +@require(lambda path: isinstance(path, Path), "Path must be Path object") +@ensure(lambda result: isinstance(result, str), "Must return string") +def suggest_frontmatter(path: Path) -> str: + """Return a suggested frontmatter block for a document.""" + return f"""--- +title: "{path.stem}" +doc_owner: specfact-cli +tracks: + - src/specfact_cli/** + - openspec/** +last_reviewed: {datetime.date.today().isoformat()} +exempt: false +exempt_reason: "" +--- +""" + + +def _rel_posix(path: Path) -> str: + try: + return path.resolve().relative_to(_root().resolve()).as_posix() + except ValueError: + return str(path).replace("\\", "/") + + +def _load_enforced_patterns() -> list[str] | None: + """Return path patterns from docs/.doc-frontmatter-enforced, or None if file missing.""" + path = _enforced_path() + if not path.exists(): + return None + lines: list[str] = [] + for raw in path.read_text(encoding="utf-8").splitlines(): + line = raw.strip() + if not line or line.startswith("#"): + continue + lines.append(line) + return lines + + +def _matches_enforced(rel: str, patterns: list[str]) -> bool: + return any(rel == p or fnmatch.fnmatch(rel, p) for p in patterns) + + +@beartype +@ensure(lambda result: isinstance(result, list), "Must return list") +def get_all_md_files() -> list[Path]: + """Discover Markdown files that are candidates for validation.""" + root = _root() + files: list[Path] = [] + for doc_dir in TRACKED_DOC_DIRS: + target = root / doc_dir + if target.exists(): + files.extend(target.rglob("*.md")) + for name in REQUIRED_ROOT_DOCS: + p = root / name + if p.exists(): + files.append(p) + + filtered: list[Path] = [] + for file_path in files: + rel = _rel_posix(file_path) + if rel in EXEMPT_FILES: + continue + try: + fm = parse_frontmatter(file_path) + except (OSError, yaml.YAMLError) as exc: + _LOG.debug("parse_frontmatter failed for %s during discovery: %r", file_path, exc, exc_info=exc) + filtered.append(file_path) + continue + exempt_val = fm.get("exempt") + if exempt_val is True and str(fm.get("exempt_reason", "")).strip(): + continue + filtered.append(file_path) + return filtered + + +def _missing_owner_by_scan(files: list[Path]) -> list[Path]: + out: list[Path] = [] + for file_path in files: + try: + content = file_path.read_text(encoding="utf-8") + if not DOC_OWNER_RE.search(content): + out.append(file_path) + except OSError: + out.append(file_path) + return out + + +def _missing_owner_via_rg(files: list[Path]) -> list[Path] | None: + """Return missing list, or None to fall back to line-by-line scan.""" + file_args = [str(f) for f in files] + try: + result = subprocess.run( + ["rg", "--files-without-match", r"^\s*doc_owner\s*:", *file_args], + capture_output=True, + text=True, + timeout=60, + ) + except subprocess.TimeoutExpired: + return None + # rg: 0 = listed files exist, 1 = no listings (e.g. all files match pattern), 2+ = error + if result.returncode >= 2: + return None + if result.returncode == 1: + return [] + missing: list[Path] = [] + for line in result.stdout.strip().splitlines(): + if line.strip(): + missing.append(Path(line)) + return missing + + +@beartype +@require(lambda files: isinstance(files, list), "Files must be list") +@ensure(lambda result: isinstance(result, list), "Must return list") +def rg_missing_doc_owner(files: list[Path]) -> list[Path]: + """Return paths whose raw content lacks a doc_owner field.""" + if not files: + return [] + try: + via_rg = _missing_owner_via_rg(files) + if via_rg is not None: + return via_rg + except (FileNotFoundError, subprocess.TimeoutExpired, OSError) as exc: + _ERR.print( + f"doc-frontmatter: rg-based doc_owner scan failed ({exc!r}); falling back to per-file scan.", + ) + return _missing_owner_by_scan(files) + + +def _validate_record(fm: dict[str, Any]) -> list[str]: + """Return human-readable errors for a frontmatter dict (non-exempt docs).""" + missing = [f"missing `{key}`" for key in REQUIRED_KEYS if key not in fm] + if missing: + return missing + try: + DocFrontmatter.model_validate(fm) + except ValidationError as exc: + return _format_doc_frontmatter_errors(exc) + return [] + + +def _discover_paths_to_check(all_docs: bool) -> list[Path] | None: + """Return files to validate, or None when the run should exit 0 early (skip).""" + if all_docs: + return get_all_md_files() + enforced = _load_enforced_patterns() + if enforced is None: + _ERR.print( + "doc-frontmatter: docs/.doc-frontmatter-enforced not found — skipping " + "(use --all-docs to validate every doc).", + ) + return None + if len(enforced) == 0: + _ERR.print("doc-frontmatter: enforced list is empty — nothing to check.") + return None + return [f for f in get_all_md_files() if _matches_enforced(_rel_posix(f), enforced)] + + +def _collect_failures( + all_files: list[Path], + files_without_owner: list[Path], + fix_hint: bool, +) -> list[str]: + failures: list[str] = [] + for file_path in all_files: + rel = _rel_posix(file_path) + if file_path in files_without_owner: + if fix_hint: + failures.append( + f" ✗ MISSING doc_owner: {rel}\n\n Suggested frontmatter:\n{suggest_frontmatter(file_path)}" + ) + else: + failures.append(f" ✗ MISSING doc_owner: {rel}") + continue + try: + fm = parse_frontmatter(file_path) + except (OSError, yaml.YAMLError) as exc: + failures.append(f" ✗ YAML parse error: {rel}: {exc}") + continue + errs = _validate_record(fm) + if errs: + msg = f" ✗ INVALID frontmatter: {rel}\n - " + "\n - ".join(errs) + if fix_hint: + msg += f"\n\n Suggested frontmatter:\n{suggest_frontmatter(file_path)}" + failures.append(msg) + return failures + + +def _argv_ok(argv: list[str] | None) -> bool: + return argv is None or all(isinstance(x, str) for x in argv) + + +def _run_validation(fix_hint: bool, all_docs: bool) -> int: + all_files = _discover_paths_to_check(all_docs) + if all_files is None: + return 0 + if not all_files: + _ERR.print("✅ No documentation paths matched — nothing to check.") + return 0 + + files_without_owner = rg_missing_doc_owner(all_files) + failures = _collect_failures(all_files, files_without_owner, fix_hint) + + if failures: + _ERR.print(f"\n❌ Doc frontmatter validation failed ({len(failures)} issue(s)):\n") + for block in failures: + _ERR.print(block) + if not fix_hint: + _ERR.print("\n💡 Tip: run with --fix-hint for suggested frontmatter blocks.") + return 1 + + scope = "all docs" if all_docs else "enforced list" + _ERR.print(f"✅ Doc frontmatter OK — {len(all_files)} doc(s) checked (scope: {scope}).") + return 0 + + +app = typer.Typer( + add_completion=False, + no_args_is_help=False, + help="Validate documentation ownership frontmatter for Markdown files.", +) + + +@app.callback(invoke_without_command=True) +def _cli_root( + fix_hint: bool = typer.Option( + False, + "--fix-hint", + help="Print suggested YAML blocks for common failures.", + ), + all_docs: bool = typer.Option( + False, + "--all-docs", + help="Validate every discovered Markdown file (ignore docs/.doc-frontmatter-enforced).", + ), +) -> None: + """Validate doc frontmatter and terminate with the validator exit code.""" + raise typer.Exit(code=_run_validation(fix_hint, all_docs)) + + +@beartype +@require(_argv_ok, "argv must be None or a list of strings") +@ensure(lambda result: result in (0, 1), "exit code must be 0 or 1") +def main(argv: list[str] | None = None) -> int: + """CLI entry: validate doc frontmatter; exit 0 on success, 1 on failure.""" + args = list(sys.argv[1:]) if argv is None else list(argv) + cli = get_command(app) + try: + exit_code = cli.main(args=args, prog_name="doc-frontmatter-check", standalone_mode=False) + except SystemExit as exc: + code = exc.code + if isinstance(code, int): + return code + return 1 + if exit_code is None: + return 0 + return int(exit_code) + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/pre_commit_code_review.py b/scripts/pre_commit_code_review.py old mode 100644 new mode 100755 index 859fafda..d7146d20 --- a/scripts/pre_commit_code_review.py +++ b/scripts/pre_commit_code_review.py @@ -1,22 +1,71 @@ -"""Run specfact code review as a staged-file pre-commit gate.""" +"""Run specfact code review as a staged-file pre-commit gate. + +Writes a machine-readable JSON report to ``.specfact/code-review.json`` (gitignored) +so IDEs and Copilot can read findings; exit code still reflects the governed CI verdict. + +CrossHair: skip (importlib/subprocess side effects; not amenable to full symbolic execution) +""" -# CrossHair: ignore # This helper shells out to the CLI and is intentionally side-effecting. from __future__ import annotations import importlib +import json import subprocess import sys from collections.abc import Sequence from pathlib import Path +from subprocess import TimeoutExpired +from beartype import beartype from icontract import ensure, require +from pydantic import BaseModel, ConfigDict, Field, ValidationError, field_validator PYTHON_SUFFIXES = {".py", ".pyi"} +# Default matches dogfood / OpenSpec: machine-readable report under ignored ``.specfact/``. +REVIEW_JSON_OUT = ".specfact/code-review.json" + + +class ReviewFinding(BaseModel): + """Minimal validated review finding for summary rendering.""" + + model_config = ConfigDict(extra="ignore") + + severity: str = Field(default="other") + + @field_validator("severity", mode="before") + @classmethod + def _normalize_severity(cls, value: object) -> str: + if not isinstance(value, str): + return "other" + key = value.lower().strip() + if key in ("error", "err"): + return "error" + if key in ("warning", "warn"): + return "warning" + if key in ("advisory", "advise"): + return "advisory" + if key == "info": + return "info" + return "other" + + +class CodeReviewReport(BaseModel): + """Minimal validated review report for summary rendering.""" + + model_config = ConfigDict(extra="ignore") + + findings: list[ReviewFinding] + overall_verdict: str | None = None + +CodeReviewReport.model_rebuild() + + +@beartype @require(lambda paths: paths is not None) @ensure(lambda result: len(result) == len(set(result))) @ensure(lambda result: all(Path(path).suffix.lower() in PYTHON_SUFFIXES for path in result)) @@ -34,42 +83,200 @@ def filter_review_files(paths: Sequence[str]) -> list[str]: return filtered +@beartype @require(lambda files: files is not None) @ensure(lambda result: result[:5] == [sys.executable, "-m", "specfact_cli.cli", "code", "review"]) -@ensure(lambda result: "--score-only" in result) +@ensure(lambda result: "--json" in result and "--out" in result) +@ensure(lambda result: REVIEW_JSON_OUT in result) def build_review_command(files: Sequence[str]) -> list[str]: - """Build the score-only review command used by the pre-commit gate.""" - return [sys.executable, "-m", "specfact_cli.cli", "code", "review", "run", "--score-only", *files] + """Build ``code review run --json --out …`` so findings are written for tooling.""" + return [ + sys.executable, + "-m", + "specfact_cli.cli", + "code", + "review", + "run", + "--json", + "--out", + REVIEW_JSON_OUT, + *files, + ] + + +def _repo_root() -> Path: + """Repository root (parent of ``scripts/``).""" + return Path(__file__).resolve().parents[1] + + +def _report_path(repo_root: Path) -> Path: + """Absolute path to the machine-readable review report.""" + return repo_root / REVIEW_JSON_OUT + + +def _count_findings_by_severity(findings: list[ReviewFinding]) -> dict[str, int]: + """Bucket validated review findings by normalized severity.""" + buckets = {"error": 0, "warning": 0, "advisory": 0, "info": 0, "other": 0} + for finding in findings: + buckets[finding.severity] += 1 + return buckets + + +def _load_review_report(report_path: Path) -> CodeReviewReport | None: + """Load and validate the review JSON report.""" + try: + data = json.loads(report_path.read_text(encoding="utf-8")) + except (OSError, UnicodeDecodeError) as exc: + sys.stderr.write(f"Code review: could not read {REVIEW_JSON_OUT}: {exc}\n") + return None + except json.JSONDecodeError as exc: + sys.stderr.write(f"Code review: invalid JSON in {REVIEW_JSON_OUT}: {exc}\n") + return None + if not isinstance(data, dict): + sys.stderr.write( + f"Code review: expected a JSON object at top level in {REVIEW_JSON_OUT} (got {type(data).__name__}).\n", + ) + return None -@ensure(lambda result: isinstance(result[0], bool)) + try: + return CodeReviewReport.model_validate(data) + except ValidationError as exc: + sys.stderr.write(f"Code review: invalid review report in {REVIEW_JSON_OUT}: {exc}\n") + return None + + +def _print_review_findings_summary(repo_root: Path) -> bool: + """Parse ``REVIEW_JSON_OUT`` and print a one-line findings count (errors / warnings / etc.).""" + report_path = _report_path(repo_root) + if not report_path.is_file(): + sys.stderr.write(f"Code review: no report file at {REVIEW_JSON_OUT} (could not print findings summary).\n") + return False + report = _load_review_report(report_path) + if report is None: + return False + + counts = _count_findings_by_severity(report.findings) + total = len(report.findings) + verdict = report.overall_verdict or "?" + parts = [ + f"errors={counts['error']}", + f"warnings={counts['warning']}", + f"advisory={counts['advisory']}", + ] + if counts["info"]: + parts.append(f"info={counts['info']}") + if counts["other"]: + parts.append(f"other={counts['other']}") + summary = ", ".join(parts) + # stderr keeps the summary separate from nested `specfact code review run` stdout; enable hook + # `verbose: true` in .pre-commit-config.yaml so pre-commit prints hook output when the hook passes. + sys.stderr.write(f"Code review summary: {total} finding(s) ({summary}); overall_verdict={verdict!r}.\n") + abs_report = report_path.resolve() + sys.stderr.write(f"Code review report file: {REVIEW_JSON_OUT}\n") + sys.stderr.write(f" absolute path: {abs_report}\n") + sys.stderr.write("Copy-paste for Copilot or Cursor:\n") + sys.stderr.write( + f" Read `{REVIEW_JSON_OUT}` and fix every finding (errors first), using file and line from each entry.\n" + ) + sys.stderr.write(f" @workspace Open `{REVIEW_JSON_OUT}` and remediate each item in `findings`.\n") + return True + + +@beartype +@ensure(lambda result: isinstance(result, tuple) and len(result) == 2) +@ensure(lambda result: isinstance(result[0], bool) and (result[1] is None or isinstance(result[1], str))) def ensure_runtime_available() -> tuple[bool, str | None]: """Verify the current Python environment can import SpecFact CLI.""" try: importlib.import_module("specfact_cli.cli") - except ModuleNotFoundError: - return False, 'Install dev dependencies with `pip install -e ".[dev]"` or run `hatch env create`.' + except ModuleNotFoundError as exc: + if exc.name in ("specfact_cli", "specfact_cli.cli"): + return False, 'Install dev dependencies with `pip install -e ".[dev]"` or run `hatch env create`.' + raise return True, None +def _prepare_report_path(repo_root: Path) -> Path: + """Create the review-report directory and clear any stale report file.""" + report_path = _report_path(repo_root) + report_path.parent.mkdir(parents=True, exist_ok=True) + if report_path.is_file(): + report_path.unlink() + return report_path + + +def _run_review_subprocess( + cmd: Sequence[str], + repo_root: Path, + files: Sequence[str], +) -> subprocess.CompletedProcess[str] | None: + """Run the nested SpecFact review command and handle timeout reporting.""" + try: + return subprocess.run( + list(cmd), + check=False, + text=True, + capture_output=True, + cwd=str(repo_root), + timeout=300, + ) + except TimeoutExpired: + joined_cmd = " ".join(cmd) + sys.stderr.write(f"Code review gate timed out after 300s (command: {joined_cmd!r}, files: {list(files)!r}).\n") + return None + + +def _emit_completed_output(result: subprocess.CompletedProcess[str]) -> None: + """Forward captured subprocess output to stderr when the JSON report is missing.""" + if result.stdout: + sys.stderr.write(result.stdout if result.stdout.endswith("\n") else result.stdout + "\n") + if result.stderr: + sys.stderr.write(result.stderr if result.stderr.endswith("\n") else result.stderr + "\n") + + +def _missing_report_exit_code( + report_path: Path, + result: subprocess.CompletedProcess[str], +) -> int: + """Return the gate exit code when the nested review run failed to create its JSON report.""" + _emit_completed_output(result) + sys.stderr.write( + f"Code review: expected review report at {report_path.relative_to(_repo_root())} but it was not created.\n" + ) + return result.returncode if result.returncode != 0 else 1 + + +@beartype +@require(lambda argv: argv is None or isinstance(argv, (list, tuple))) @ensure(lambda result: isinstance(result, int)) def main(argv: Sequence[str] | None = None) -> int: - """Run the score-only review gate over staged Python files.""" - files = filter_review_files(list(argv or [])) - if not files: + """Run the code review gate; write JSON under ``.specfact/`` and return CLI exit code.""" + paths_arg = [] if argv is None else list(argv) + files = filter_review_files(paths_arg) + try: + files[0] + except IndexError: sys.stdout.write("No staged Python files to review; skipping code review gate.\n") return 0 available, guidance = ensure_runtime_available() - if not available: + if available is False: sys.stdout.write(f"Unable to run the code review gate. {guidance}\n") return 1 - result = subprocess.run(build_review_command(files), check=False, text=True, capture_output=True) - if result.stdout: - sys.stdout.write(result.stdout) - if result.stderr: - sys.stderr.write(result.stderr) + repo_root = _repo_root() + cmd = build_review_command(files) + report_path = _prepare_report_path(repo_root) + result = _run_review_subprocess(cmd, repo_root, files) + if result is None: + return 1 + if not report_path.is_file(): + return _missing_report_exit_code(report_path, result) + # Do not echo nested `specfact code review run` stdout/stderr (verbose tool banners); full report + # is in REVIEW_JSON_OUT; we print a short summary on stderr below. + if not _print_review_findings_summary(repo_root): + return 1 return result.returncode diff --git a/scripts/publish-module.py b/scripts/publish-module.py old mode 100644 new mode 100755 diff --git a/scripts/validate-modules-repo-sync.py b/scripts/validate-modules-repo-sync.py old mode 100644 new mode 100755 diff --git a/scripts/verify-bundle-published.py b/scripts/verify-bundle-published.py old mode 100644 new mode 100755 diff --git a/setup.py b/setup.py index f5df4c72..525f4944 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ if __name__ == "__main__": _setup = setup( name="specfact-cli", - version="0.43.1", + version="0.43.2", description=( "The swiss knife CLI for agile DevOps teams. Keep backlog, specs, tests, and code in sync with " "validation and contract enforcement for new projects and long-lived codebases." diff --git a/src/__init__.py b/src/__init__.py index d44458c3..9678e8e8 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -3,4 +3,4 @@ """ # Package version: keep in sync with pyproject.toml, setup.py, src/specfact_cli/__init__.py -__version__ = "0.43.1" +__version__ = "0.43.2" diff --git a/src/specfact_cli/__init__.py b/src/specfact_cli/__init__.py index ef990799..bbbe0769 100644 --- a/src/specfact_cli/__init__.py +++ b/src/specfact_cli/__init__.py @@ -6,6 +6,9 @@ - Keeping backlog, specs, tests, and code in sync - Enforcing validation and contract checks before production - Supporting agile ceremonies and team workflows + +When a sibling ``specfact-cli-modules`` checkout exists, startup prepends each bundle's ``src`` +to ``sys.path`` so local development can load marketplace packages without installing wheels. """ from __future__ import annotations @@ -42,6 +45,6 @@ def _bootstrap_bundle_paths() -> None: _bootstrap_bundle_paths() -__version__ = "0.43.1" +__version__ = "0.43.2" __all__ = ["__version__"] diff --git a/tests/conftest.py b/tests/conftest.py index 44b4ee65..20434f96 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -139,3 +139,8 @@ def pytest_collection_modifyitems(config: object, items: list[object]) -> None: continue if _should_skip_migrated_test(rel): item.add_marker(skip_marker) + + +# Pytest 8+ / 9+: ``pytest_plugins`` is only allowed in the rootdir conftest (not in nested +# packages). Doc frontmatter fixtures live in ``tests.helpers.doc_frontmatter_fixtures``. +pytest_plugins = ("tests.helpers.doc_frontmatter_fixtures",) diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py new file mode 100644 index 00000000..26787be5 --- /dev/null +++ b/tests/helpers/__init__.py @@ -0,0 +1 @@ +"""Shared test helpers (importable from unit and integration tests).""" diff --git a/tests/helpers/doc_frontmatter.py b/tests/helpers/doc_frontmatter.py new file mode 100644 index 00000000..357d740c --- /dev/null +++ b/tests/helpers/doc_frontmatter.py @@ -0,0 +1,67 @@ +"""Shared helpers for doc frontmatter unit and integration tests.""" + +from __future__ import annotations + +import datetime +import importlib.util +from collections.abc import Callable +from pathlib import Path +from typing import cast + +from tests.helpers.doc_frontmatter_types import CheckDocFrontmatterModule + + +def repo_root() -> Path: + """Repository root (parent of ``tests/``).""" + return Path(__file__).resolve().parents[2] + + +def load_check_doc_frontmatter_module() -> CheckDocFrontmatterModule: + """Load ``scripts/check_doc_frontmatter.py`` without mutating ``sys.path``.""" + script_path = repo_root() / "scripts" / "check_doc_frontmatter.py" + spec = importlib.util.spec_from_file_location("check_doc_frontmatter", script_path) + if spec is None or spec.loader is None: + raise AssertionError(f"Unable to load module from {script_path}") + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + dfm = getattr(module, "DocFrontmatter", None) + if dfm is not None: + dfm.model_rebuild(_types_namespace={"datetime": datetime}) + return cast(CheckDocFrontmatterModule, module) + + +def validation_main_fn(mod: CheckDocFrontmatterModule) -> Callable[[list[str] | None], int]: + """Return ``check_doc_frontmatter.main`` with a concrete type for static analysis.""" + return mod.main + + +VALID_DOC_FRONTMATTER = """--- +title: "Valid Document" +doc_owner: specfact-cli +tracks: + - src/test/** +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Valid content""" + + +def write_enforced(root: Path, *relative_paths: str) -> None: + """Write ``docs/.doc-frontmatter-enforced`` with the given repo-relative paths.""" + enforced = root / "docs" / ".doc-frontmatter-enforced" + enforced.parent.mkdir(parents=True, exist_ok=True) + enforced.write_text("\n".join(relative_paths) + "\n", encoding="utf-8") + + +def enforce_all_markdown_under_docs(root: Path) -> None: + """Populate enforced list with every ``docs/**/*.md`` path (except the enforced file itself).""" + lines: list[str] = [] + docs = root / "docs" + if docs.exists(): + for p in docs.rglob("*.md"): + if p.name == ".doc-frontmatter-enforced": + continue + lines.append(p.relative_to(root).as_posix()) + write_enforced(root, *lines) diff --git a/tests/helpers/doc_frontmatter_fixtures.py b/tests/helpers/doc_frontmatter_fixtures.py new file mode 100644 index 00000000..f3833306 --- /dev/null +++ b/tests/helpers/doc_frontmatter_fixtures.py @@ -0,0 +1,45 @@ +"""Shared pytest fixtures for doc frontmatter unit and integration tests. + +Registered from ``tests/conftest.py`` via ``pytest_plugins`` (Pytest 8+ requires a +root-level conftest for plugin lists; nested ``conftest.py`` cannot use +``pytest_plugins``). +""" + +from __future__ import annotations + +import pytest + +from tests.helpers.doc_frontmatter import load_check_doc_frontmatter_module +from tests.helpers.doc_frontmatter_types import CheckDocFrontmatterModule + + +_DOC_FRONTMATTER_PATH = "test_doc_frontmatter" + + +def pytest_runtest_setup(item: pytest.Item) -> None: + """Clear owner-resolution cache between doc-frontmatter tests (lru_cache isolation).""" + nodeid = getattr(item, "nodeid", "") or "" + path_str = "" + path = getattr(item, "path", None) + if path is not None: + path_str = str(path) + if _DOC_FRONTMATTER_PATH not in nodeid and _DOC_FRONTMATTER_PATH not in path_str: + return + mod = load_check_doc_frontmatter_module() + mod._resolve_owner_impl.cache_clear() + + +@pytest.fixture(scope="session") +def check_doc_frontmatter_module() -> CheckDocFrontmatterModule: + """Single loaded instance of ``scripts/check_doc_frontmatter.py`` (no ``sys.path`` hacks).""" + return load_check_doc_frontmatter_module() + + +@pytest.fixture(autouse=True) +def _clear_doc_frontmatter_root(request: pytest.FixtureRequest, monkeypatch: pytest.MonkeyPatch) -> None: + """Isolate ``DOC_FRONTMATTER_ROOT`` for doc-frontmatter tests only.""" + nodeid = getattr(request.node, "nodeid", "") or "" + path_str = str(getattr(request.node, "path", "")) + if _DOC_FRONTMATTER_PATH not in nodeid and _DOC_FRONTMATTER_PATH not in path_str: + return + monkeypatch.delenv("DOC_FRONTMATTER_ROOT", raising=False) diff --git a/tests/helpers/doc_frontmatter_types.py b/tests/helpers/doc_frontmatter_types.py new file mode 100644 index 00000000..310eadff --- /dev/null +++ b/tests/helpers/doc_frontmatter_types.py @@ -0,0 +1,55 @@ +"""Typing helpers for dynamically loaded ``check_doc_frontmatter`` module.""" + +from __future__ import annotations + +from collections.abc import Callable +from datetime import date +from pathlib import Path +from typing import Any, Protocol, runtime_checkable + + +@runtime_checkable +class ResolveOwnerImplWithCache(Protocol): + """Callable owner resolver exposing ``cache_clear`` like ``functools.lru_cache``.""" + + def __call__(self, owner: str, root_key: str) -> bool: + raise NotImplementedError + + def cache_clear(self) -> None: + raise NotImplementedError + + +@runtime_checkable +class CheckDocFrontmatterModule(Protocol): + """Structural type for ``scripts/check_doc_frontmatter.py`` loaded via importlib.""" + + DocFrontmatter: type[DocFrontmatterModel] + parse_frontmatter: Callable[[Path], dict[str, Any]] + resolve_owner: Callable[[str], bool] + validate_glob_patterns: Callable[[list[str]], bool] + suggest_frontmatter: Callable[[Path], str] + extract_doc_owner: Callable[[str], str | None] + get_all_md_files: Callable[[], list[Path]] + rg_missing_doc_owner: Callable[[list[Path]], list[Path]] + main: Callable[[list[str] | None], int] + datetime: Any + _resolve_owner_impl: ResolveOwnerImplWithCache + + +@runtime_checkable +class DocFrontmatterRecord(Protocol): + """Minimal validated model instance shape used by tests.""" + + title: str + doc_owner: str + tracks: list[str] + last_reviewed: date + + +@runtime_checkable +class DocFrontmatterModel(Protocol): + """Minimal model type exposing ``model_validate`` for tests.""" + + @classmethod + def model_validate(cls, data: dict[str, object]) -> DocFrontmatterRecord: + raise NotImplementedError diff --git a/tests/integration/scripts/test_doc_frontmatter/test_integration.py b/tests/integration/scripts/test_doc_frontmatter/test_integration.py new file mode 100644 index 00000000..0ce23333 --- /dev/null +++ b/tests/integration/scripts/test_doc_frontmatter/test_integration.py @@ -0,0 +1,254 @@ +#!/usr/bin/env python3 +"""Integration tests for doc frontmatter validation.""" + +from __future__ import annotations + +import re +import tempfile +from pathlib import Path + +import pytest + +from tests.helpers.doc_frontmatter import ( + VALID_DOC_FRONTMATTER, + enforce_all_markdown_under_docs, + validation_main_fn, + write_enforced, +) +from tests.helpers.doc_frontmatter_types import CheckDocFrontmatterModule + + +ANSI_ESCAPE_RE = re.compile(r"\x1b\[[0-9;]*[A-Za-z]") + + +class TestEndToEndWorkflow: + """Test complete end-to-end validation workflows.""" + + def test_complete_validation_workflow( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test the complete validation workflow with various file types.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "valid.md").write_text(VALID_DOC_FRONTMATTER) + (docs_dir / "invalid.md").write_text( + """--- +title: "Invalid Document" +--- + +# Missing required fields""" + ) + (docs_dir / "no_frontmatter.md").write_text("# No frontmatter at all") + (root / "src" / "test" / "module").mkdir(parents=True) + enforce_all_markdown_under_docs(root) + assert validation_main([]) == 1 + + def test_validation_with_all_valid_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test validation workflow when all files are valid.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + for i in range(3): + (docs_dir / f"valid{i}.md").write_text( + VALID_DOC_FRONTMATTER.replace("Valid Document", f"Valid Document {i}") + ) + (root / "src" / "test" / "module").mkdir(parents=True) + enforce_all_markdown_under_docs(root) + assert validation_main([]) == 0 + + +class TestMultipleFileScenarios: + """Test validation with multiple files and complex scenarios.""" + + def test_large_number_of_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test performance with a large number of files.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + for i in range(50): + if i % 3 == 0: + (docs_dir / f"file{i}.md").write_text( + f"""--- +title: "File {i}" +--- + +# Missing doc_owner""" + ) + else: + (docs_dir / f"file{i}.md").write_text(VALID_DOC_FRONTMATTER.replace("Valid Document", f"File {i}")) + (root / "src" / "test" / "module").mkdir(parents=True) + enforce_all_markdown_under_docs(root) + assert validation_main([]) == 1 + + def test_nested_directory_structure( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test with nested directory structures.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "level1" / "level2" / "level3").mkdir(parents=True) + for rel in ( + "root.md", + "level1/level1.md", + "level1/level2/level2.md", + "level1/level2/level3/level3.md", + ): + (docs_dir / rel).write_text(VALID_DOC_FRONTMATTER.replace("Valid Document", rel)) + (root / "src" / "test" / "module").mkdir(parents=True) + enforce_all_markdown_under_docs(root) + assert validation_main([]) == 0 + + +class TestScaleSmoke: + """Scale / smoke tests: many files and large bodies (not benchmark assertions).""" + + def test_execution_time_with_many_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Many small valid docs complete successfully (smoke, not timing assertions).""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + for i in range(100): + (docs_dir / f"file{i}.md").write_text(VALID_DOC_FRONTMATTER.replace("Valid Document", f"File {i}")) + (root / "src" / "test" / "module").mkdir(parents=True) + enforce_all_markdown_under_docs(root) + assert validation_main([]) == 0 + + def test_memory_usage_with_large_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Large Markdown bodies still validate (smoke, not memory instrumentation).""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + large_content = "# Large content\n" * 1000 + for i in range(10): + body = VALID_DOC_FRONTMATTER.replace("Valid Document", f"Large File {i}") + "\n" + large_content + (docs_dir / f"large{i}.md").write_text(body) + (root / "src" / "test" / "module").mkdir(parents=True) + enforce_all_markdown_under_docs(root) + assert validation_main([]) == 0 + + +class TestCommandLineInterface: + """Test command-line interface functionality.""" + + def test_cli_with_fix_hint_flag( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test CLI with --fix-hint flag.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "invalid.md").write_text("# Missing frontmatter") + write_enforced(root, "docs/invalid.md") + assert validation_main(["--fix-hint"]) == 1 + + def test_cli_help_output( + self, capsys: pytest.CaptureFixture[str], check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test CLI help output.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + assert validation_main(["--help"]) == 0 + captured = capsys.readouterr() + combined = ANSI_ESCAPE_RE.sub("", captured.out + captured.err).lower() + assert "usage" in combined or "help" in combined + assert "--fix-hint" in combined + + +class TestRealWorldScenarios: + """Test real-world usage scenarios.""" + + def test_mixed_exempt_and_regular_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test scenario with both exempt and regular files.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "LICENSE.md").write_text("# License") + (docs_dir / "exempt.md").write_text( + """--- +title: "Exempt" +exempt: true +exempt_reason: "Legal document" +--- + +# Exempt content""" + ) + (docs_dir / "regular.md").write_text(VALID_DOC_FRONTMATTER) + (docs_dir / "invalid.md").write_text( + """--- +title: "Invalid" +--- + +# Missing doc_owner""" + ) + (root / "src" / "test" / "module").mkdir(parents=True) + write_enforced(root, "docs/regular.md", "docs/invalid.md", "docs/exempt.md") + assert validation_main([]) == 1 + + def test_complex_tracking_patterns( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test with complex glob patterns in tracks field.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "complex1.md").write_text( + """--- +title: "Complex Patterns" +doc_owner: src/test/module +tracks: + - src/**/*.py + - tests/**/test_*.py + - docs/**/*.md + - "**/excluded/**" +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Complex tracking patterns""" + ) + (root / "src" / "test" / "module").mkdir(parents=True) + write_enforced(root, "docs/complex1.md") + assert validation_main([]) == 0 + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/unit/scripts/test_code_review_module_docs.py b/tests/unit/scripts/test_code_review_module_docs.py index d2b90f6e..87542316 100644 --- a/tests/unit/scripts/test_code_review_module_docs.py +++ b/tests/unit/scripts/test_code_review_module_docs.py @@ -11,7 +11,12 @@ def test_code_review_docs_cover_pre_commit_gate_and_portable_adoption() -> None: docs = _docs_text() assert "## Pre-Commit Review Gate" in docs assert ".pre-commit-config.yaml" in docs - assert "specfact code review run --score-only" in docs + assert "specfact code review run" in docs + assert ".specfact/code-review.json" in docs + assert "verbose: true" in docs + assert "Verdict line, report file, and Copilot" in docs + assert "Code review summary" in docs + assert "copilot" in docs.lower() assert "## Add to Any Project" in docs diff --git a/tests/unit/scripts/test_doc_frontmatter/test_schema.py b/tests/unit/scripts/test_doc_frontmatter/test_schema.py new file mode 100644 index 00000000..7cd8c070 --- /dev/null +++ b/tests/unit/scripts/test_doc_frontmatter/test_schema.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python3 +""" +Test suite for doc frontmatter schema functionality. +Tests the YAML frontmatter parsing and validation logic. +""" + +from __future__ import annotations + +import datetime +from pathlib import Path + +import pytest + +from tests.helpers.doc_frontmatter_types import CheckDocFrontmatterModule + + +class TestFrontmatterParsing: + """Test YAML frontmatter parsing functionality.""" + + def test_valid_frontmatter_parsing( + self, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test parsing valid YAML frontmatter.""" + parse_frontmatter = check_doc_frontmatter_module.parse_frontmatter + content = """--- +title: "Test Document" +doc_owner: src/test/module +tracks: + - src/test/** +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Document content here""" + + test_file = tmp_path / "test.md" + test_file.write_text(content, encoding="utf-8") + result = parse_frontmatter(test_file) + assert result is not None + assert "title" in result + assert "doc_owner" in result + assert "tracks" in result + assert result["title"] == "Test Document" + assert result["doc_owner"] == "src/test/module" + assert result["tracks"] == ["src/test/**"] + + def test_missing_required_fields( + self, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test detection of missing required fields.""" + parse_frontmatter = check_doc_frontmatter_module.parse_frontmatter + content = """--- +title: "Incomplete Document" +--- + +# Document content here""" + + test_file = tmp_path / "incomplete.md" + test_file.write_text(content, encoding="utf-8") + result = parse_frontmatter(test_file) + assert result is not None + assert result["title"] == "Incomplete Document" + assert "doc_owner" not in result + assert "tracks" not in result + assert "last_reviewed" not in result + assert "exempt" not in result + assert "exempt_reason" not in result + + def test_no_frontmatter(self, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test handling of files without frontmatter.""" + parse_frontmatter = check_doc_frontmatter_module.parse_frontmatter + content = "# Document without frontmatter" + + test_file = tmp_path / "plain.md" + test_file.write_text(content, encoding="utf-8") + result = parse_frontmatter(test_file) + assert result == {} + + +class TestOwnerResolution: + """Test owner identifier resolution functionality.""" + + def test_token_owner_resolution( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Known token ``openspec`` resolves without filesystem lookup.""" + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(tmp_path)) + resolve_owner = check_doc_frontmatter_module.resolve_owner + result = resolve_owner("openspec") + assert result is True + + def test_filesystem_owner_resolution( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Path-like owner resolves when the directory exists under DOC_FRONTMATTER_ROOT.""" + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(tmp_path)) + (tmp_path / "src" / "fs_owner_pkg").mkdir(parents=True) + resolve_owner = check_doc_frontmatter_module.resolve_owner + assert resolve_owner("src/fs_owner_pkg") is True + + def test_known_token_resolution( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test resolution of known owner tokens.""" + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(tmp_path)) + resolve_owner = check_doc_frontmatter_module.resolve_owner + result = resolve_owner("specfact-cli") + assert result is True + + def test_invalid_owner_resolution( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test handling of invalid owner identifiers.""" + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(tmp_path)) + resolve_owner = check_doc_frontmatter_module.resolve_owner + result = resolve_owner("nonexistent-owner") + assert result is False + + def test_owner_rejects_traversal_and_glob( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Unsafe owner literals do not resolve.""" + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(tmp_path)) + (tmp_path / "safe").mkdir() + resolve_owner = check_doc_frontmatter_module.resolve_owner + assert resolve_owner("../safe") is False + assert resolve_owner("**") is False + assert resolve_owner("") is False + + +class TestDocFrontmatterModel: + """Pydantic model for validated ownership records.""" + + def test_model_validate_accepts_minimal_valid_dict( + self, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + doc_frontmatter_model = check_doc_frontmatter_module.DocFrontmatter + data = { + "title": "T", + "doc_owner": "specfact-cli", + "tracks": ["src/**"], + "last_reviewed": "2026-01-01", + "exempt": False, + "exempt_reason": "", + } + rec = doc_frontmatter_model.model_validate(data) + assert rec.title == "T" + assert rec.doc_owner == "specfact-cli" + assert rec.tracks == ["src/**"] + assert str(rec.last_reviewed) == "2026-01-01" + + +class TestGlobPatternValidation: + """Test glob pattern validation functionality.""" + + def test_valid_glob_patterns(self, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test validation of valid glob patterns.""" + validate_glob_patterns = check_doc_frontmatter_module.validate_glob_patterns + patterns = ["src/test/**", "docs/**/*.md", "*.py"] + result = validate_glob_patterns(patterns) + assert result is True + + def test_invalid_glob_patterns(self, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test detection of invalid glob patterns.""" + validate_glob_patterns = check_doc_frontmatter_module.validate_glob_patterns + patterns = ["src/test/[", "invalid{{pattern"] + result = validate_glob_patterns(patterns) + assert result is False + + +class TestFrontmatterSuggestions: + """Test frontmatter suggestion generation.""" + + def test_suggest_frontmatter_template( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test generation of suggested frontmatter template.""" + fixed = datetime.date(2026, 3, 15) + + class _FakeDate: + @staticmethod + def today() -> datetime.date: + return fixed + + class _FakeDatetimeModule: + date = _FakeDate + + monkeypatch.setattr(check_doc_frontmatter_module, "datetime", _FakeDatetimeModule) + suggest_frontmatter = check_doc_frontmatter_module.suggest_frontmatter + path = Path("test-document.md") + suggestion = suggest_frontmatter(path) + + assert suggestion is not None + assert "---" in suggestion + assert "title:" in suggestion + assert "doc_owner:" in suggestion + assert "tracks:" in suggestion + assert "last_reviewed:" in suggestion + assert fixed.isoformat() in suggestion + assert "exempt:" in suggestion + + +class TestExtractDocOwner: + """Test doc_owner extraction functionality.""" + + def test_extract_valid_owner(self, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test extraction of valid doc_owner from content.""" + extract_doc_owner = check_doc_frontmatter_module.extract_doc_owner + content = """--- +title: "Test" +doc_owner: src/test/module +---""" + + result = extract_doc_owner(content) + assert result == "src/test/module" + + def test_extract_missing_owner(self, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test handling of missing doc_owner.""" + extract_doc_owner = check_doc_frontmatter_module.extract_doc_owner + content = """--- +title: "Test" +---""" + + result = extract_doc_owner(content) + assert result is None + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/unit/scripts/test_doc_frontmatter/test_validation.py b/tests/unit/scripts/test_doc_frontmatter/test_validation.py new file mode 100644 index 00000000..aa43fcaa --- /dev/null +++ b/tests/unit/scripts/test_doc_frontmatter/test_validation.py @@ -0,0 +1,266 @@ +#!/usr/bin/env python3 +"""Tests for doc frontmatter validation (scripts/check_doc_frontmatter.py).""" + +from __future__ import annotations + +import tempfile +from pathlib import Path + +import pytest + +from tests.helpers.doc_frontmatter import write_enforced +from tests.helpers.doc_frontmatter_types import CheckDocFrontmatterModule + + +class TestFileDiscovery: + """Test Markdown file discovery functionality.""" + + def test_discover_docs_directory_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test discovery of files in docs directory.""" + get_all_md_files = check_doc_frontmatter_module.get_all_md_files + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "test1.md").write_text("# Test 1") + (docs_dir / "test2.md").write_text("---\ntitle: Test\n---\n# Test 2") + (docs_dir / "subdir").mkdir() + (docs_dir / "subdir" / "test3.md").write_text("# Test 3") + files = get_all_md_files() + assert len(files) == 3 + + def test_exempt_files_exclusion( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test that exempt files are excluded from discovery.""" + get_all_md_files = check_doc_frontmatter_module.get_all_md_files + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "regular.md").write_text("# Regular") + (docs_dir / "LICENSE.md").write_text("# License") + (docs_dir / "exempt.md").write_text( + "---\ntitle: Test\nexempt: true\nexempt_reason: Test reason\n---\n# Exempt" + ) + files = get_all_md_files() + assert len(files) == 1 + assert "regular.md" in str(files[0]) + + +class TestMissingDocOwnerDetection: + """Test detection of missing doc_owner fields.""" + + def test_missing_doc_owner_detection(self, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test detection of files missing doc_owner.""" + rg_missing_doc_owner = check_doc_frontmatter_module.rg_missing_doc_owner + with tempfile.TemporaryDirectory() as temp_dir: + docs_dir = Path(temp_dir) / "docs" + docs_dir.mkdir() + (docs_dir / "with_owner.md").write_text("---\ntitle: Test\ndoc_owner: test\n---\n# Content") + (docs_dir / "without_owner.md").write_text("---\ntitle: Test\n---\n# Content") + (docs_dir / "no_frontmatter.md").write_text("# No frontmatter") + files = [ + docs_dir / "with_owner.md", + docs_dir / "without_owner.md", + docs_dir / "no_frontmatter.md", + ] + missing_owner = rg_missing_doc_owner(files) + assert len(missing_owner) == 2 + assert docs_dir / "without_owner.md" in missing_owner + assert docs_dir / "no_frontmatter.md" in missing_owner + assert docs_dir / "with_owner.md" not in missing_owner + + def test_all_files_have_owner(self, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test when all files have doc_owner.""" + rg_missing_doc_owner = check_doc_frontmatter_module.rg_missing_doc_owner + with tempfile.TemporaryDirectory() as temp_dir: + docs_dir = Path(temp_dir) / "docs" + docs_dir.mkdir() + (docs_dir / "file1.md").write_text("---\ntitle: Test\ndoc_owner: test\n---\n# Content") + (docs_dir / "file2.md").write_text("---\ntitle: Test\ndoc_owner: test\n---\n# Content") + files = [docs_dir / "file1.md", docs_dir / "file2.md"] + missing_owner = rg_missing_doc_owner(files) + assert len(missing_owner) == 0 + + +class TestValidationMainFunction: + """Test the main validation function.""" + + def test_validation_with_valid_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test validation with all valid files.""" + validation_main = check_doc_frontmatter_module.main + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "valid1.md").write_text( + """--- +title: "Valid Document" +doc_owner: src/test/module +tracks: + - src/test/** +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Valid content""" + ) + (root / "src" / "test" / "module").mkdir(parents=True) + write_enforced(root, "docs/valid1.md") + result = validation_main([]) + assert result == 0 + + def test_validation_with_invalid_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test validation with invalid files.""" + validation_main = check_doc_frontmatter_module.main + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "invalid.md").write_text( + """--- +title: "Invalid Document" +--- + +# Missing doc_owner""" + ) + write_enforced(root, "docs/invalid.md") + result = validation_main([]) + assert result == 1 + + +class TestOwnerResolutionValidation: + """Test owner resolution validation.""" + + def test_valid_owner_resolution( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test validation with valid owner resolution.""" + validation_main = check_doc_frontmatter_module.main + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "valid.md").write_text( + """--- +title: "Valid Document" +doc_owner: src/test/module +tracks: + - src/test/** +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Content""" + ) + (root / "src" / "test" / "module").mkdir(parents=True) + write_enforced(root, "docs/valid.md") + result = validation_main([]) + assert result == 0 + + def test_invalid_owner_resolution( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test validation with invalid owner resolution.""" + validation_main = check_doc_frontmatter_module.main + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "invalid.md").write_text( + """--- +title: "Invalid Document" +doc_owner: nonexistent/owner +tracks: + - src/test/** +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Content""" + ) + write_enforced(root, "docs/invalid.md") + result = validation_main([]) + assert result == 1 + + +class TestFixHintGeneration: + """Test fix hint generation functionality.""" + + def test_fix_hint_for_missing_frontmatter( + self, + monkeypatch: pytest.MonkeyPatch, + capsys: pytest.CaptureFixture[str], + check_doc_frontmatter_module: CheckDocFrontmatterModule, + ) -> None: + """Test fix hint generation for missing frontmatter.""" + validation_main = check_doc_frontmatter_module.main + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "no_frontmatter.md").write_text("# Document without frontmatter") + write_enforced(root, "docs/no_frontmatter.md") + result = validation_main(["--fix-hint"]) + assert result == 1 + captured = capsys.readouterr() + assert "MISSING doc_owner" in captured.err + assert "Suggested frontmatter" in captured.err + assert "doc_owner:" in captured.err + assert "tracks:" in captured.err + assert "last_reviewed:" in captured.err + assert "exempt:" in captured.err + assert "exempt_reason:" in captured.err + + def test_fix_hint_for_invalid_owner( + self, + monkeypatch: pytest.MonkeyPatch, + capsys: pytest.CaptureFixture[str], + check_doc_frontmatter_module: CheckDocFrontmatterModule, + ) -> None: + """Test fix hint generation for invalid owner.""" + validation_main = check_doc_frontmatter_module.main + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "invalid.md").write_text( + """--- +title: "Invalid Document" +doc_owner: invalid/owner +tracks: + - src/** +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Content""" + ) + write_enforced(root, "docs/invalid.md") + result = validation_main(["--fix-hint"]) + assert result == 1 + captured = capsys.readouterr() + assert "INVALID" in captured.err or "does not resolve" in captured.err + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/unit/scripts/test_pre_commit_code_review.py b/tests/unit/scripts/test_pre_commit_code_review.py index 03ec8c38..1293e45d 100644 --- a/tests/unit/scripts/test_pre_commit_code_review.py +++ b/tests/unit/scripts/test_pre_commit_code_review.py @@ -5,6 +5,7 @@ from __future__ import annotations import importlib.util +import json import subprocess import sys from pathlib import Path @@ -34,14 +35,16 @@ def test_filter_review_files_keeps_only_python_sources() -> None: ] -def test_build_review_command_uses_score_only_mode() -> None: - """Pre-commit gate should rely on score-only exit-code semantics.""" +def test_build_review_command_writes_json_report() -> None: + """Pre-commit gate should write ReviewReport JSON for IDE/Copilot and use exit verdict.""" module = _load_script_module() command = module.build_review_command(["src/app.py", "tests/test_app.py"]) assert command[:5] == [sys.executable, "-m", "specfact_cli.cli", "code", "review"] - assert "--score-only" in command + assert "--json" in command + assert "--out" in command + assert module.REVIEW_JSON_OUT in command assert command[-2:] == ["src/app.py", "tests/test_app.py"] @@ -55,23 +58,164 @@ def test_main_skips_when_no_relevant_files(capsys: pytest.CaptureFixture[str]) - assert "No staged Python files" in capsys.readouterr().out -def test_main_propagates_review_gate_exit_code(monkeypatch: pytest.MonkeyPatch) -> None: +def test_main_propagates_review_gate_exit_code( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path, capsys: pytest.CaptureFixture[str] +) -> None: """Blocking review verdicts must block the commit by returning non-zero.""" module = _load_script_module() + repo_root = tmp_path + payload = { + "overall_verdict": "FAIL", + "findings": [ + {"severity": "error", "rule": "e1"}, + {"severity": "warning", "rule": "w1"}, + ], + } + + def _fake_root() -> Path: + return repo_root + + def _fake_ensure() -> tuple[bool, str | None]: + return True, None + + def _fake_run(cmd: list[str], **kwargs: object) -> subprocess.CompletedProcess[str]: + assert "--json" in cmd + assert module.REVIEW_JSON_OUT in cmd + assert kwargs.get("cwd") == str(repo_root) + assert kwargs.get("timeout") == 300 + _write_sample_review_report(repo_root, payload) + return subprocess.CompletedProcess(cmd, 1, stdout=".specfact/code-review.json\n", stderr="") + + monkeypatch.setattr(module, "_repo_root", _fake_root) + monkeypatch.setattr(module, "ensure_runtime_available", _fake_ensure) + monkeypatch.setattr(module.subprocess, "run", _fake_run) + + exit_code = module.main(["src/app.py"]) + + assert exit_code == 1 + captured = capsys.readouterr() + assert captured.out == "" + err = captured.err + assert "Code review summary: 2 finding(s)" in err + assert "errors=1" in err + assert "warnings=1" in err + assert "overall_verdict='FAIL'" in err + assert "Code review report file:" in err + assert "absolute path:" in err + assert "Copy-paste for Copilot or Cursor:" in err + assert "Read `.specfact/code-review.json`" in err + assert "@workspace Open `.specfact/code-review.json`" in err + + +def _write_sample_review_report(repo_root: Path, payload: dict[str, object]) -> None: + spec_dir = repo_root / ".specfact" + spec_dir.mkdir(parents=True, exist_ok=True) + (spec_dir / "code-review.json").write_text(json.dumps(payload), encoding="utf-8") + + +def test_count_findings_by_severity_buckets_unknown() -> None: + """Severities map to error/warning/advisory; others go to other.""" + module = _load_script_module() + counts = module._count_findings_by_severity( + [ + module.ReviewFinding(severity="error"), + module.ReviewFinding(severity="WARN"), + module.ReviewFinding(severity="advisory"), + module.ReviewFinding(severity="info"), + module.ReviewFinding(severity="custom"), + module.ReviewFinding.model_validate({}), + ] + ) + assert counts == {"error": 1, "warning": 1, "advisory": 1, "info": 1, "other": 2} + + +def test_main_missing_report_still_returns_exit_code_and_warns( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path, capsys: pytest.CaptureFixture[str] +) -> None: + """If JSON is missing, the hook fails and surfaces subprocess output for debugging.""" + module = _load_script_module() + + def _fake_root() -> Path: + return tmp_path + + def _fake_ensure() -> tuple[bool, str | None]: + return True, None + + def _fake_run(cmd: list[str], **kwargs: object) -> subprocess.CompletedProcess[str]: + report_dir = tmp_path / ".specfact" + assert report_dir.is_dir() + return subprocess.CompletedProcess(cmd, 0, stdout="review stdout", stderr="review stderr") + + monkeypatch.setattr(module, "_repo_root", _fake_root) + monkeypatch.setattr(module, "ensure_runtime_available", _fake_ensure) + monkeypatch.setattr(module.subprocess, "run", _fake_run) + + exit_code = module.main(["src/app.py"]) + + assert exit_code == 1 + err = capsys.readouterr().err + assert "no report file" in err or "expected review report" in err + assert ".specfact/code-review.json" in err + assert "review stdout" in err + assert "review stderr" in err + + +def test_main_timeout_fails_hook(monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]) -> None: + """Subprocess timeout must fail the hook with a clear message.""" + module = _load_script_module() + repo_root = Path(__file__).resolve().parents[3] + + def _fake_root() -> Path: + return repo_root + + def _fake_ensure() -> tuple[bool, str | None]: + return True, None + + def _fake_run(cmd: list[str], **kwargs: object) -> subprocess.CompletedProcess[str]: + assert kwargs.get("cwd") == str(repo_root) + assert kwargs.get("timeout") == 300 + raise subprocess.TimeoutExpired(cmd, 300) + + monkeypatch.setattr(module, "_repo_root", _fake_root) + monkeypatch.setattr(module, "ensure_runtime_available", _fake_ensure) + monkeypatch.setattr(module.subprocess, "run", _fake_run) + + exit_code = module.main(["src/app.py"]) + + assert exit_code == 1 + err = capsys.readouterr().err + assert "timed out after 300s" in err + assert "src/app.py" in err + + +def test_print_summary_rejects_non_object_json( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path, capsys: pytest.CaptureFixture[str] +) -> None: + """Top-level JSON must be an object so findings can be summarized safely.""" + module = _load_script_module() + repo_root = tmp_path + + def _fake_root() -> Path: + return repo_root def _fake_ensure() -> tuple[bool, str | None]: return True, None - def _fake_run(cmd: list[str], **_: object) -> subprocess.CompletedProcess[str]: - assert "--score-only" in cmd - return subprocess.CompletedProcess(cmd, 1, stdout="-7\n", stderr="") + def _fake_run(cmd: list[str], **kwargs: object) -> subprocess.CompletedProcess[str]: + spec_dir = repo_root / ".specfact" + spec_dir.mkdir(parents=True, exist_ok=True) + (spec_dir / "code-review.json").write_text(json.dumps(["not", "an", "object"]), encoding="utf-8") + return subprocess.CompletedProcess(cmd, 0, stdout="", stderr="") + monkeypatch.setattr(module, "_repo_root", _fake_root) monkeypatch.setattr(module, "ensure_runtime_available", _fake_ensure) monkeypatch.setattr(module.subprocess, "run", _fake_run) exit_code = module.main(["src/app.py"]) assert exit_code == 1 + err = capsys.readouterr().err + assert "expected a JSON object" in err def test_main_prints_actionable_setup_guidance_when_runtime_missing( diff --git a/tests/unit/specfact_cli/registry/test_signing_artifacts.py b/tests/unit/specfact_cli/registry/test_signing_artifacts.py index 47d520f6..45feb6a4 100644 --- a/tests/unit/specfact_cli/registry/test_signing_artifacts.py +++ b/tests/unit/specfact_cli/registry/test_signing_artifacts.py @@ -6,8 +6,10 @@ import re from pathlib import Path +from typing import Any, cast import pytest +import yaml REPO_ROOT = Path(__file__).resolve().parents[4] @@ -19,6 +21,23 @@ PUBLISH_PYPI_SCRIPT = REPO_ROOT / ".github" / "workflows" / "scripts" / "check-and-publish-pypi.sh" +def _load_pr_orchestrator_jobs() -> dict[str, dict[str, Any]]: + """Return the parsed jobs mapping for the PR orchestrator workflow.""" + if not PR_ORCHESTRATOR_WORKFLOW.exists(): + pytest.skip("pr-orchestrator workflow not present") + data = yaml.safe_load(PR_ORCHESTRATOR_WORKFLOW.read_text(encoding="utf-8")) + assert isinstance(data, dict), "Expected mapping at pr-orchestrator workflow root" + workflow_root = cast(dict[str, Any], data) + jobs = workflow_root.get("jobs") + assert isinstance(jobs, dict), "Expected jobs mapping in pr-orchestrator workflow" + typed_jobs: dict[str, dict[str, Any]] = {} + for name, job in jobs.items(): + assert isinstance(name, str), "Expected string job names in pr-orchestrator workflow" + assert isinstance(job, dict), f"Expected mapping definition for job {name}" + typed_jobs[name] = job + return typed_jobs + + def test_sign_module_script_exists(): """Signing script scripts/sign-module.sh SHALL exist.""" assert SIGN_SCRIPT.exists(), "scripts/sign-module.sh must exist for signing automation" @@ -518,6 +537,49 @@ def test_pr_orchestrator_pins_virtualenv_below_21_for_hatch_jobs(): assert "virtualenv<21" in command, f"Missing virtualenv<21 pin in command: {command}" +@pytest.mark.parametrize( + ("job_name", "required_needs"), + ( + ("compat-py311", {"changes", "verify-module-signatures"}), + ("contract-first-ci", {"changes", "verify-module-signatures"}), + ("type-checking", {"changes", "verify-module-signatures"}), + ("linting", {"changes", "verify-module-signatures"}), + ("cli-validation", {"changes", "verify-module-signatures"}), + ), +) +def test_pr_orchestrator_independent_jobs_do_not_wait_for_tests( + job_name: str, + required_needs: set[str], +) -> None: + """Independent validation jobs SHALL start after the shared signature gate, not after tests.""" + jobs = _load_pr_orchestrator_jobs() + job = jobs.get(job_name) + assert job is not None, f"Missing {job_name} job" + needs = job.get("needs") + assert isinstance(needs, list), f"Expected list needs for {job_name}" + assert set(needs) == required_needs + assert "tests" not in needs + + +def test_pr_orchestrator_quality_gates_still_depends_on_tests_for_coverage() -> None: + """Coverage-based advisory gate SHALL retain the tests dependency.""" + jobs = _load_pr_orchestrator_jobs() + job = jobs.get("quality-gates") + assert job is not None, "Missing quality-gates job" + needs = job.get("needs") + assert isinstance(needs, list), "Expected list needs for quality-gates" + assert set(needs) == {"changes", "tests"} + + +def test_pr_orchestrator_cache_paths_do_not_restore_hatch_virtualenvs() -> None: + """PR orchestrator SHALL cache package downloads, not Hatch virtualenv directories.""" + if not PR_ORCHESTRATOR_WORKFLOW.exists(): + pytest.skip("pr-orchestrator workflow not present") + content = PR_ORCHESTRATOR_WORKFLOW.read_text(encoding="utf-8") + assert "~/.cache/uv" in content + assert "~/.local/share/hatch" not in content + + def test_publish_script_pins_virtualenv_below_21_for_hatch_build(): """PyPI publish script SHALL pin virtualenv<21 when installing hatch.""" if not PUBLISH_PYPI_SCRIPT.exists():