diff --git a/.coderabbit.yaml b/.coderabbit.yaml new file mode 100644 index 00000000..ab666bf1 --- /dev/null +++ b/.coderabbit.yaml @@ -0,0 +1,105 @@ +# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json +# +# CodeRabbit aligns with AGENTS.md: contract-first APIs, OpenSpec, Hatch quality gates. +# `reviews.auto_review.base_branches` includes `dev` so PRs into `dev` are auto-reviewed (not only the +# repo default branch). See https://docs.coderabbit.ai/reference/configuration (auto_review). +# Pre-push / finalize: run `cr --base dev` (or `coderabbit review`) from repo root; see +# https://docs.coderabbit.ai/cli/overview +# PR description: include `@coderabbitai summary` (default placeholder) for the high-level summary. +# +language: "en-US" +early_access: false +tone_instructions: >- + Provide architectural insights on system design, scalability, and long-term maintainability. + Prioritize contract-first public APIs (@icontract, @beartype), Typer/Rich CLI boundaries, OpenSpec + traceability, and cross-repo impact for bundled modules. + +reviews: + profile: assertive + request_changes_workflow: false + high_level_summary: true + high_level_summary_in_walkthrough: true + review_details: true + sequence_diagrams: true + estimate_code_review_effort: true + assess_linked_issues: true + related_issues: true + related_prs: true + poem: false + collapse_walkthrough: true + changed_files_summary: true + review_status: true + commit_status: true + high_level_summary_instructions: | + Structure the summary for SpecFact CLI maintainers: + - User-visible behavior and CLI surface (commands, options, defaults). + - Contract/API impact: public functions, Pydantic models, module package boundaries. + - Testing and quality gates: contracts, hatch run contract-test, coverage implications. + - If applicable: OpenSpec change ID, docs/ and CHANGELOG touchpoints, module signing / version bumps. + auto_review: + enabled: true + drafts: false + auto_incremental_review: true + # PRs targeting `dev` (not only the GitHub default branch, e.g. `main`) get automatic reviews. + base_branches: + - "^dev$" + path_instructions: + - path: "src/specfact_cli/**/*.py" + instructions: | + Focus on modular CLI architecture: lazy module loading, registry/bootstrap patterns, and + dependency direction. Flag breaking changes to public APIs, Pydantic models, and resource + bundling. Verify @icontract + @beartype on public surfaces; prefer centralized logging + (get_bridge_logger) over print(). + - path: "openspec/**/*.md" + instructions: | + Treat as specification source of truth: proposal/tasks/spec deltas vs. code behavior, + CHANGE_ORDER consistency, and scenario coverage. Surface drift between OpenSpec and + implementation. + - path: "tests/**/*.py" + instructions: | + Contract-first testing: meaningful scenarios, not redundant assertions already covered by + contracts. Flag flakiness, environment coupling, and missing coverage for changed behavior. + - path: ".github/workflows/**" + instructions: | + CI safety: secrets usage, workflow dependencies, alignment with hatch test / contract-test + gates, and action versions. + - path: "scripts/**/*.py" + instructions: | + Deterministic tooling: subprocess safety, Hatch integration, and parity with documented + quality gates (format, type-check, module signing). + - path: "docs/**/*.md" + instructions: | + User-facing accuracy: CLI examples match current behavior; preserve Jekyll front matter; + call out when README/docs index need sync. + + tools: + ruff: + enabled: true + semgrep: + enabled: true + yamllint: + enabled: true + actionlint: + enabled: true + shellcheck: + enabled: true + + pre_merge_checks: + title: + mode: warning + requirements: "Prefer Conventional Commits-style prefixes (feat:, fix:, docs:, test:, refactor:, chore:)." + issue_assessment: + mode: warning + +knowledge_base: + learnings: + scope: local + linked_repositories: + - repository: "nold-ai/specfact-cli-modules" + instructions: >- + Companion repo: bundled module packages (module-package.yaml), IDE prompts, and + modules.specfact.io content. When CLI surface, imports, or bundled module contracts change, + flag required updates, version bumps, and signature verification impacts. + +chat: + auto_reply: true diff --git a/.github/workflows/docs-review.yml b/.github/workflows/docs-review.yml index b722c4d5..3afa28db 100644 --- a/.github/workflows/docs-review.yml +++ b/.github/workflows/docs-review.yml @@ -9,9 +9,16 @@ on: - "**/*.md" - "**/*.mdc" - "docs/**" + - "docs/.doc-frontmatter-enforced" - "tests/unit/docs/**" + - "tests/unit/scripts/test_doc_frontmatter/**" + - "tests/integration/scripts/test_doc_frontmatter/**" + - "tests/helpers/doc_frontmatter.py" + - "tests/helpers/doc_frontmatter_fixtures.py" + - "tests/helpers/doc_frontmatter_types.py" - "scripts/check-docs-commands.py" - "scripts/check-cross-site-links.py" + - "scripts/check_doc_frontmatter.py" - "pyproject.toml" - ".github/workflows/docs-review.yml" push: @@ -20,9 +27,16 @@ on: - "**/*.md" - "**/*.mdc" - "docs/**" + - "docs/.doc-frontmatter-enforced" - "tests/unit/docs/**" + - "tests/unit/scripts/test_doc_frontmatter/**" + - "tests/integration/scripts/test_doc_frontmatter/**" + - "tests/helpers/doc_frontmatter.py" + - "tests/helpers/doc_frontmatter_fixtures.py" + - "tests/helpers/doc_frontmatter_types.py" - "scripts/check-docs-commands.py" - "scripts/check-cross-site-links.py" + - "scripts/check_doc_frontmatter.py" - "pyproject.toml" - ".github/workflows/docs-review.yml" workflow_dispatch: @@ -62,11 +76,14 @@ jobs: continue-on-error: true run: hatch run check-cross-site-links --warn-only - - name: Run docs review suite + - name: Validate documentation frontmatter (enforced paths; aligns with local docs-validate / pre-commit) + run: hatch run doc-frontmatter-check + + - name: Run docs review test suites run: | mkdir -p logs/docs-review DOCS_REVIEW_LOG="logs/docs-review/docs-review_$(date -u +%Y%m%d_%H%M%S).log" - hatch run pytest tests/unit/docs/ -q 2>&1 | tee "$DOCS_REVIEW_LOG" + hatch run pytest tests/unit/docs/ tests/unit/scripts/test_doc_frontmatter/ tests/integration/scripts/test_doc_frontmatter/ -q 2>&1 | tee "$DOCS_REVIEW_LOG" exit "${PIPESTATUS[0]:-$?}" - name: Upload docs review logs diff --git a/.github/workflows/pr-orchestrator.yml b/.github/workflows/pr-orchestrator.yml index f95470e6..be22944a 100644 --- a/.github/workflows/pr-orchestrator.yml +++ b/.github/workflows/pr-orchestrator.yml @@ -151,7 +151,6 @@ jobs: uses: actions/cache@v4 with: path: | - ~/.local/share/hatch ~/.cache/uv key: ${{ runner.os }}-hatch-tests-py312-${{ hashFiles('pyproject.toml', 'src/specfact_cli/modules/*/__init__.py') }} restore-keys: | @@ -219,7 +218,7 @@ jobs: compat-py311: name: Compatibility (Python 3.11) runs-on: ubuntu-latest - needs: [changes, tests] + needs: [changes, verify-module-signatures] if: needs.changes.outputs.skip_tests_dev_to_main != 'true' permissions: contents: read @@ -248,7 +247,6 @@ jobs: uses: actions/cache@v4 with: path: | - ~/.local/share/hatch ~/.cache/uv key: ${{ runner.os }}-hatch-compat-py311-${{ hashFiles('pyproject.toml', 'src/specfact_cli/modules/*/__init__.py') }} restore-keys: | @@ -274,7 +272,7 @@ jobs: contract-first-ci: name: Contract-First CI runs-on: ubuntu-latest - needs: [changes, tests, compat-py311] + needs: [changes, verify-module-signatures] if: needs.changes.outputs.skip_tests_dev_to_main != 'true' permissions: contents: read @@ -303,7 +301,6 @@ jobs: uses: actions/cache@v4 with: path: | - ~/.local/share/hatch ~/.cache/uv key: ${{ runner.os }}-hatch-contract-first-py312-${{ hashFiles('pyproject.toml', 'src/specfact_cli/modules/*/__init__.py') }} restore-keys: | @@ -336,7 +333,7 @@ jobs: cli-validation: name: CLI Command Validation runs-on: ubuntu-latest - needs: [changes, contract-first-ci] + needs: [changes, verify-module-signatures] if: needs.changes.outputs.skip_tests_dev_to_main != 'true' permissions: contents: read @@ -414,7 +411,7 @@ jobs: type-checking: name: Type Checking (basedpyright) runs-on: ubuntu-latest - needs: [changes, tests] + needs: [changes, verify-module-signatures] if: needs.changes.outputs.skip_tests_dev_to_main != 'true' permissions: contents: read @@ -449,7 +446,7 @@ jobs: linting: name: Linting (ruff, pylint) runs-on: ubuntu-latest - needs: [changes, tests] + needs: [changes, verify-module-signatures] if: needs.changes.outputs.skip_tests_dev_to_main != 'true' permissions: contents: read diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e92d81bf..cd0ffafd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,14 +1,23 @@ repos: - repo: local hooks: - - id: specfact-code-review-gate - name: Run code review gate on staged Python files - entry: hatch run python scripts/pre_commit_code_review.py - language: system - files: \.pyi?$ - id: verify-module-signatures name: Verify module signatures and version bumps entry: hatch run ./scripts/verify-modules-signature.py --require-signature --enforce-version-bump language: system pass_filenames: false always_run: true + - id: check-doc-frontmatter + name: Check documentation ownership frontmatter (enforced paths) + entry: hatch run doc-frontmatter-check + language: system + files: ^(docs/.*\.md|USAGE-FAQ\.md)$ + pass_filenames: false + always_run: false + - id: specfact-code-review-gate + name: Run code review gate on staged Python files + entry: hatch run python scripts/pre_commit_code_review.py + language: system + files: \.pyi?$ + # Show summary + copy-paste lines on success; pre-commit hides hook output otherwise. + verbose: true diff --git a/AGENTS.md b/AGENTS.md index e1ffc3cd..988e01cf 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -37,6 +37,9 @@ hatch run lint-workflows # GitHub Actions actionlint # Code scanning hatch run scan-all # semgrep analysis + +# SpecFact code review JSON (dogfood; see "SpecFact Code Review JSON" below and openspec/config.yaml) +hatch run specfact code review run --json --out .specfact/code-review.json ``` ## Architecture @@ -172,6 +175,36 @@ Run all steps in order before committing. Every step must pass with no errors. 5. `hatch run contract-test` # contract-first validation 6. `hatch run smart-test` # targeted test run (use `smart-test-full` for larger modifications) +With `pre-commit` installed (`pre-commit install`), staged `*.py` / `*.pyi` files also run the local code review gate (`scripts/pre_commit_code_review.py`), which writes the same `.specfact/code-review.json` path. That hook is fast feedback; it does not replace the **PR / change-completion** review rules in the next section when your OpenSpec tasks require a full-scope run. + +### SpecFact Code Review JSON (Dogfood, Quality Gate) + +This matches **`openspec/config.yaml`** (project `context` and **`rules.tasks`** for code review): treat **`.specfact/code-review.json`** as mandatory evidence before an OpenSpec change is considered complete and before you rely on “all gates green” for a PR. + +**When to (re)run the review** + +- The file is **missing**, or +- It is **stale**: the report’s last-modified time is older than any file you changed for this work under `src/`, `scripts/`, `tools/`, `tests/`, or under `openspec/changes//` **except** `openspec/changes//TDD_EVIDENCE.md` — evidence-only edits there do **not** by themselves invalidate the review; re-run when proposal, specs, tasks, design, or code change. + +**Command** + +```bash +hatch run specfact code review run --json --out .specfact/code-review.json +``` + +- While iterating on a branch, prefer a **changed-files scope** when available (e.g. `--scope changed`) so feedback stays fast. +- Before the **final PR** for a change, run a **full** (or equivalent) scope so the report covers the whole quality surface your tasks expect (e.g. `--scope full`). + +**Remediation** + +- Read the JSON report and fix **every** finding at any severity (warning, advisory, error, or equivalent in the schema) unless the change proposal documents a **rare, explicit, justified** exception. +- After substantive edits, re-run until the report shows a **passing** outcome from the review module (e.g. overall verdict PASS / CI exit 0 per schema). +- Record the review command(s) and timestamp in `openspec/changes//TDD_EVIDENCE.md` or in the PR description when the change touches behavior or quality gates. + +**Consistency** + +- OpenSpec change **`tasks.md`** should include explicit tasks for generating/updating this file and clearing findings (see `openspec/config.yaml` → `rules.tasks` → “SpecFact code review JSON”). Agent runs should treat those tasks and this section as the same bar. + ### Module Signature Gate (Required for Change Finalization) Before PR creation, every change MUST pass bundled module signature verification: diff --git a/CHANGELOG.md b/CHANGELOG.md index 74212e47..e28cb74b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,49 @@ All notable changes to this project will be documented in this file. --- +## [0.43.2] - 2026-03-29 + +### Added + +- Documentation ownership frontmatter rollout: + - `scripts/check_doc_frontmatter.py` + - `docs/.doc-frontmatter-enforced` (rollout scope) + - `hatch run doc-frontmatter-check` + - Pre-commit hook integration + - Guide: `docs/contributing/docs-sync.md` + - Sample pages include the extended schema; use `--all-docs` for a full-site check. + +### Fixed + +- **Tests:** Register doc-frontmatter shared fixtures via root ``tests/conftest.py`` + ``pytest_plugins`` (Pytest 8+ forbids ``pytest_plugins`` in nested conftests); scope env/cache + isolation to ``test_doc_frontmatter`` paths only. +- OpenSpec (`openspec/config.yaml`): **SpecFact code review JSON** dogfood tasks — require a fresh + `.specfact/code-review.json`, remediate review findings before merge, and record TDD evidence; **freshness** + excludes `openspec/changes//TDD_EVIDENCE.md` from the staleness comparison so evidence-only edits + there do not by themselves force a new review run. +- `scripts/pre_commit_code_review.py`: invoke review from repo root (`cwd`), emit JSON as above, and enforce a + 300s subprocess timeout with `TimeoutExpired` handling so the hook cannot hang indefinitely. + +### Changed + +- **CI:** `Docs Review` GitHub Actions workflow runs `hatch run doc-frontmatter-check` + and includes doc-frontmatter unit/integration tests alongside `tests/unit/docs/`, with path + filters for frontmatter scripts and helpers. +- Pre-commit `specfact-code-review-gate`: switched from `specfact code review run` with `--score-only` to + `specfact code review run --json --out .specfact/code-review.json`, writing governed `ReviewReport` JSON under + gitignored `.specfact/` for IDE and Copilot workflows; the hook now prints severity counts on stderr, a short + summary with the report path (absolute path hint), copy-paste prompts for Copilot/Cursor, and relies on + `verbose: true` so successful runs still surface that feedback next to the nested CLI output. +- Documentation: `docs/modules/code-review.md` updated for the JSON report path and the pre-commit hook behavior. +- Tests: `tests/unit/scripts/test_pre_commit_code_review.py` and `test_code_review_module_docs.py` updated for + `--json`/`--out`, repo-root `cwd`, and timeout handling. +- Doc frontmatter validation now uses a Pydantic `DocFrontmatter` model, stricter `tracks` glob checks + (`fnmatch.translate` + `re.compile` after bracket/brace balance), and a reference page at + `docs/contributing/frontmatter-schema.md`. + +--- + ## [0.43.1] - 2026-03-28 ### Changed diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c8fea83e..e4b3b031 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -70,6 +70,7 @@ hatch test --cover -v - **Type Hints**: Use type hints with `@beartype` runtime checking - **Testing**: Contract-first testing with scenario tests - **Documentation**: Update relevant documentation +- **Docs site (`docs/`)**: Jekyll pages keep existing frontmatter (`layout`, `title`, `permalink`, …). Pages listed in `docs/.doc-frontmatter-enforced` must also include the doc-sync ownership fields (`doc_owner`, `tracks`, `last_reviewed`, `exempt`, `exempt_reason`). See [docs/contributing/docs-sync.md](docs/contributing/docs-sync.md). - **Docstrings**: Follow Google Python Style Guide ### Pre-commit Checks diff --git a/docs/.doc-frontmatter-enforced b/docs/.doc-frontmatter-enforced new file mode 100644 index 00000000..bfca20a4 --- /dev/null +++ b/docs/.doc-frontmatter-enforced @@ -0,0 +1,8 @@ +# Paths (relative to repo root) that must carry full doc-sync frontmatter. +# Expand this list as the rollout progresses; use `scripts/check_doc_frontmatter.py --all-docs` for a full-site check. +docs/index.md +docs/getting-started/quickstart.md +docs/core-cli/init.md +docs/reference/documentation-url-contract.md +docs/contributing/docs-sync.md +docs/contributing/frontmatter-schema.md diff --git a/docs/contributing/docs-sync.md b/docs/contributing/docs-sync.md new file mode 100644 index 00000000..5db92b5d --- /dev/null +++ b/docs/contributing/docs-sync.md @@ -0,0 +1,50 @@ +--- +layout: default +title: Documentation ownership and frontmatter +permalink: /contributing/docs-sync/ +description: YAML frontmatter schema for documentation ownership, tracking, and validation on docs.specfact.io. +keywords: [documentation, frontmatter, ownership, validation] +audience: [solo, team, enterprise] +expertise_level: [intermediate, advanced] +doc_owner: specfact-cli +tracks: + - scripts/check_doc_frontmatter.py + - docs/** +last_reviewed: 2026-03-29 +exempt: false +exempt_reason: "" +--- + +# Documentation ownership and frontmatter + +Core documentation uses **YAML frontmatter** for Jekyll (layout, title, permalink) and for **ownership** fields that drive the `scripts/check_doc_frontmatter.py` checker. + +## Schema (ownership) + +For field definitions, validation rules, and cross-repo notes, see **[Doc-sync frontmatter reference](frontmatter-schema.md)**. + +Add these keys alongside existing Jekyll fields: + +| Field | Required | Description | +| --- | --- | --- | +| `doc_owner` | yes | Repo path (e.g. `src/specfact_cli`) or a known token (`specfact-cli`, `nold-ai`, `openspec`). | +| `tracks` | yes | List of glob patterns for sources this page should stay aligned with. | +| `last_reviewed` | yes | `YYYY-MM-DD` date of last substantive review. | +| `exempt` | yes | `true` only for stable or legal pages that skip sync rules; use with `exempt_reason`. | +| `exempt_reason` | yes | Empty string when `exempt` is `false`; non-empty when `exempt` is `true`. | + +## Validation + +From the repository root: + +```bash +hatch run doc-frontmatter-check +``` + +Use `--fix-hint` for suggested YAML blocks. During rollout, only paths listed in `docs/.doc-frontmatter-enforced` are checked unless you pass `--all-docs`. + +## Troubleshooting + +- **Missing `doc_owner`**: add the field and a sensible `tracks` list for the code or specs this page describes. +- **Owner does not resolve**: use a path that exists under the repo or one of the known tokens. +- **Invalid `tracks`**: ensure balanced `[]` and `{}` in glob patterns. diff --git a/docs/contributing/frontmatter-schema.md b/docs/contributing/frontmatter-schema.md new file mode 100644 index 00000000..a886161e --- /dev/null +++ b/docs/contributing/frontmatter-schema.md @@ -0,0 +1,46 @@ +--- +layout: default +title: Doc-sync frontmatter reference +permalink: /contributing/frontmatter-schema/ +description: Required ownership fields, validation rules, and cross-site documentation contracts for core docs. +keywords: [documentation, frontmatter, schema, validation] +audience: [solo, team, enterprise] +expertise_level: [intermediate, advanced] +doc_owner: specfact-cli +tracks: + - scripts/check_doc_frontmatter.py + - docs/contributing/** +last_reviewed: 2026-03-29 +exempt: false +exempt_reason: "" +--- + +# Doc-sync frontmatter reference + +This page is the **authoritative schema** for the ownership and tracking fields consumed by `scripts/check_doc_frontmatter.py`. Jekyll fields (`layout`, `title`, `permalink`, etc.) stay unchanged; the rows below are **additional** keys for doc-sync. + +## Required fields + +| Field | Type | Rules | +| --- | --- | --- | +| `doc_owner` | string | Repo-relative path that exists, or a token from `VALID_OWNER_TOKENS` in the script (`specfact-cli`, `nold-ai`, `openspec`). | +| `tracks` | list of strings | Non-empty; each entry is a [fnmatch](https://docs.python.org/3/library/fnmatch.html)-style glob. Brackets and braces must balance; patterns must compile as fnmatch regex (see `validate_glob_patterns` in the script). | +| `last_reviewed` | `YYYY-MM-DD` | ISO date; YAML may load as a string or date. | +| `exempt` | boolean | `true` only for pages that intentionally skip sync rules. | +| `exempt_reason` | string | Empty when `exempt` is `false`; non-empty when `exempt` is `true`. | + +Runtime validation uses a Pydantic model (`DocFrontmatter` in `scripts/check_doc_frontmatter.py`): missing keys, wrong types, bad globs, unresolved `doc_owner`, and invalid exempt/reason pairs fail the check. + +## Cross-repo notes + +- **Core vs modules:** Canonical user-facing docs for bundles and marketplace content live on [modules.specfact.io](https://modules.specfact.io/). Linking and URL rules are described in [Documentation URL contract](../reference/documentation-url-contract.md). +- **Bundled modules:** When CLI or module surface changes, update both this repo’s docs (as tracked by `tracks`) and the sibling **specfact-cli-modules** documentation where applicable; CodeRabbit linked-repo analysis can flag cross-repo drift. + +## Commands + +```bash +hatch run doc-frontmatter-check +hatch run doc-frontmatter-check -- --all-docs +``` + +Rollout scope is controlled by `docs/.doc-frontmatter-enforced`. See [Documentation ownership and frontmatter](/contributing/docs-sync/) for workflow. diff --git a/docs/core-cli/init.md b/docs/core-cli/init.md index b87c67b7..61488d21 100644 --- a/docs/core-cli/init.md +++ b/docs/core-cli/init.md @@ -6,6 +6,13 @@ description: Reference for the specfact init command - bootstrap SpecFact in a r keywords: [init, bootstrap, ide, profiles] audience: [solo, team, enterprise] expertise_level: [beginner, intermediate] +doc_owner: specfact-cli +tracks: + - src/specfact_cli/** + - openspec/** +last_reviewed: 2026-03-29 +exempt: false +exempt_reason: "" --- # specfact init diff --git a/docs/getting-started/quickstart.md b/docs/getting-started/quickstart.md index 21185bc2..1d35e081 100644 --- a/docs/getting-started/quickstart.md +++ b/docs/getting-started/quickstart.md @@ -8,6 +8,13 @@ description: Get SpecFact CLI running in under 5 minutes - install, bootstrap, a keywords: [quickstart, first-run, bootstrap, analysis] audience: [solo, team] expertise_level: [beginner] +doc_owner: specfact-cli +tracks: + - src/specfact_cli/** + - openspec/** +last_reviewed: 2026-03-29 +exempt: false +exempt_reason: "" --- # 5-Minute Quickstart diff --git a/docs/index.md b/docs/index.md index ca916fa4..0895533e 100644 --- a/docs/index.md +++ b/docs/index.md @@ -6,6 +6,13 @@ permalink: / keywords: [specfact, core-cli, runtime, module-system, architecture] audience: [solo, team, enterprise] expertise_level: [beginner, intermediate, advanced] +doc_owner: specfact-cli +tracks: + - src/specfact_cli/** + - openspec/** +last_reviewed: 2026-03-29 +exempt: false +exempt_reason: "" --- # SpecFact CLI Documentation diff --git a/docs/modules/code-review.md b/docs/modules/code-review.md index 2ea7a5cb..97aaa120 100644 --- a/docs/modules/code-review.md +++ b/docs/modules/code-review.md @@ -116,14 +116,42 @@ repos: entry: hatch run python scripts/pre_commit_code_review.py language: system files: \.pyi?$ + # Needed so you still see hook output when the gate passes (pre-commit hides it otherwise). + verbose: true ``` The helper script scopes the gate to staged Python files only and then runs: ```bash -specfact code review run --score-only +specfact code review run --json --out .specfact/code-review.json ``` +The JSON report is written under ``.specfact/`` (ignored by git via ``.specfact/`` in ``.gitignore``) so local tools and Copilot can read structured findings. The pre-commit helper does **not** print the nested review CLI’s full stdout (tool banners and runner lines); it only prints the short summary and copy-paste lines on stderr after the run. + +### Verdict line, report file, and Copilot + +After each run, the helper prints a **one-line summary on stderr** (with ``verbose: true`` on the hook so it is visible even when the commit is allowed), for example: + +```text +Code review summary: 3 finding(s) (errors=0, warnings=3, advisory=0); overall_verdict='PASS'. +``` + +Immediately after that line, the script prints the **report path** (repo-relative and absolute), then **ready-to-paste prompts** for Copilot or Cursor so you can copy them without opening this page. + +The **authoritative machine-readable report** is always at the repository root: + +```text +.specfact/code-review.json +``` + +Open that file to see ``overall_verdict``, ``score``, ``findings`` (with ``file``, ``line``, ``rule``, ``severity``, ``message``), and the text ``summary``. Regenerate it anytime with the same ``specfact code review run --json --out .specfact/...`` command (or by letting pre-commit run the gate again). + +**Do not forget:** when you want help from GitHub Copilot or Cursor, point the assistant at the JSON file so it can remediate findings with full context. Example prompts you can reuse: + +- *Read ``.specfact/code-review.json`` from the latest review run and fix every finding, starting with errors then warnings.* +- *Using the review report at ``.specfact/code-review.json``, apply fixes in the listed files at the given line numbers.* +- *@workspace Attach or open ``.specfact/code-review.json`` and address each ``findings[]`` entry.* + Commit behavior: - `PASS` keeps the commit green @@ -148,7 +176,7 @@ repos: hooks: - id: specfact-code-review name: specfact code review gate - entry: specfact code review run --score-only + entry: specfact code review run --json --out .specfact/code-review.json language: system files: \.pyi?$ ``` diff --git a/docs/reference/documentation-url-contract.md b/docs/reference/documentation-url-contract.md index 121fdff3..fbc82bf7 100644 --- a/docs/reference/documentation-url-contract.md +++ b/docs/reference/documentation-url-contract.md @@ -6,6 +6,13 @@ description: Rules for linking between docs.specfact.io and modules.specfact.io; keywords: [docs contract, handoff, core vs modules] audience: [team, enterprise] expertise_level: [advanced] +doc_owner: specfact-cli +tracks: + - src/specfact_cli/** + - openspec/** +last_reviewed: 2026-03-29 +exempt: false +exempt_reason: "" --- # Documentation URL contract (core and modules) diff --git a/openspec/CHANGE_ORDER.md b/openspec/CHANGE_ORDER.md index 43f32509..03899865 100644 --- a/openspec/CHANGE_ORDER.md +++ b/openspec/CHANGE_ORDER.md @@ -109,7 +109,7 @@ The 2026-03-22 clean-code plan adds one new cross-repo change pair and re-sequen | Module | Order | Change folder | GitHub # | Blocked by | |--------|-------|---------------|----------|------------| | docs | 01 | docs-01-core-modules-docs-alignment | [#348](https://github.com/nold-ai/specfact-cli/issues/348) | module-migration-01 ✅; module-migration-02 ✅; module-migration-03 ✅; module-migration-05 ✅; module-migration-06/07 outputs inform residual cleanup wording | -| docs | 02 | doc-frontmatter-schema | pending | — | +| docs | 02 | doc-frontmatter-schema | [#461](https://github.com/nold-ai/specfact-cli/issues/461) | Parent Feature: [#356](https://github.com/nold-ai/specfact-cli/issues/356) | | docs | 03 | ✅ docs-03-command-syntax-parity (archived 2026-03-18) | pending | docs-01 ✅; docs-02 ✅ | | docs | 04 | docs-04-docs-review-gate-and-link-integrity | pending | docs-03 ✅ | | docs | 05 | ci-docs-sync-check | pending | docs-02 (doc-frontmatter-schema) | @@ -378,8 +378,8 @@ Set these in GitHub so issue dependencies are explicit. Optional dependencies ar | [#255](https://github.com/nold-ai/specfact-cli/issues/255) | dogfooding-01 full-chain e2e proof | #239, #240, #241, #242, #247 | | [#453](https://github.com/nold-ai/specfact-cli/issues/453) | speckit-02 v0.4.x adapter alignment | — | | [modules#116](https://github.com/nold-ai/specfact-cli-modules/issues/116) | speckit-03 change proposal bridge *(modules repo)* | speckit-02 (#453) | -| TBD | doc-frontmatter-schema | — | -| TBD | ci-docs-sync-check | doc-frontmatter-schema | +| [#461](https://github.com/nold-ai/specfact-cli/issues/461) | doc-frontmatter-schema | Parent Feature: [#356](https://github.com/nold-ai/specfact-cli/issues/356) | +| TBD | ci-docs-sync-check | doc-frontmatter-schema ([#461](https://github.com/nold-ai/specfact-cli/issues/461)) | | TBD | code-review-02 ruff/radon runners | code-review-01 | | TBD | code-review-03 type/governance runners | code-review-01 | diff --git a/openspec/changes/doc-frontmatter-schema/TDD_EVIDENCE.md b/openspec/changes/doc-frontmatter-schema/TDD_EVIDENCE.md index d11de092..e05fbd3b 100644 --- a/openspec/changes/doc-frontmatter-schema/TDD_EVIDENCE.md +++ b/openspec/changes/doc-frontmatter-schema/TDD_EVIDENCE.md @@ -91,4 +91,84 @@ cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema & **Error:** - `TestPerformance::test_execution_time_with_many_files` - fixture 'benchmark' not found (expected - benchmark fixture not available) -**Status:** ✅ TDD workflow confirmed - integration tests fail before implementation \ No newline at end of file +**Status:** ✅ TDD workflow confirmed - integration tests fail before implementation + +--- + +## Post-Implementation (2026-03-29) + +**Commands:** +```bash +cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema +hatch run pytest tests/unit/scripts/test_doc_frontmatter tests/integration/scripts/test_doc_frontmatter -q +``` + +**Result:** 31 passed (unit + integration). `last_reviewed` validation accepts YAML date objects via `isoformat()`. Rollout uses `docs/.doc-frontmatter-enforced`; default CLI skips when file missing; `--all-docs` validates entire `docs/` tree. + +**Status:** ✅ Green + +--- + +## Quality gates (2026-03-30) + +Recorded for `tasks.md` §5 checklist evidence (commands run from the feature worktree). + +**Timestamp:** 2026-03-30 (local) — see session `date` when re-running. + +**Commands (success unless noted):** + +| Step | Command | Result summary | +|------|---------|------------------| +| Format | `hatch run format` | Ruff format + fix applied; exit 0 | +| Type check | `hatch run type-check` | basedpyright strict; exit 0 | +| Lint | `hatch run lint` | Full lint suite; exit 0 | +| Contract | `hatch run contract-test` | Contract-first validation; exit 0 | +| Tests | `hatch run smart-test-full` or `hatch test --cover -v` | Full suite green | +| OpenSpec | `openspec validate doc-frontmatter-schema --strict` | Validation passed | + +**Note:** Re-run the rows above after substantive edits and refresh this table if outputs change. + +--- + +## PR Orchestrator Parallelization Delta (2026-03-30) + +### Pre-Implementation Test Failure (Expected) + +**Timestamp:** 2026-03-30T01:09:14+02:00 + +**Command:** +```bash +cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema +/home/dom/git/nold-ai/specfact-cli/.venv/bin/pytest tests/unit/specfact_cli/registry/test_signing_artifacts.py -q -k 'independent_jobs_do_not_wait_for_tests or quality_gates_still_depends_on_tests_for_coverage' +``` + +**Result:** ✅ 5 tests failed as expected before workflow changes; 1 coverage-dependency guard test passed. + +**Failure summary:** +- `compat-py311` currently depends on `changes` and `tests` instead of `changes` and `verify-module-signatures` +- `contract-first-ci` currently depends on `changes`, `tests`, and `compat-py311` +- `type-checking` currently depends on `changes` and `tests` +- `linting` currently depends on `changes` and `tests` +- `cli-validation` currently depends on `changes` and `contract-first-ci` + +**Status:** ✅ TDD workflow confirmed for the PR-orchestrator dependency delta + +### Post-Implementation Test Success + +**Command:** +```bash +cd /home/dom/git/nold-ai/specfact-cli-worktrees/feature/doc-frontmatter-schema +/home/dom/git/nold-ai/specfact-cli/.venv/bin/pytest tests/unit/specfact_cli/registry/test_signing_artifacts.py -q -k 'independent_jobs_do_not_wait_for_tests or quality_gates_still_depends_on_tests_for_coverage' +/home/dom/git/nold-ai/specfact-cli/.venv/bin/pytest tests/unit/scripts/test_doc_frontmatter tests/integration/scripts/test_doc_frontmatter -q +``` + +**Result:** ✅ Workflow dependency tests passed (`6 passed, 20 deselected`) and doc-frontmatter +regression slice remained green (`34 passed`). + +**Passing summary:** +- `compat-py311`, `contract-first-ci`, `type-checking`, `linting`, and `cli-validation` now depend + on `changes` plus `verify-module-signatures` +- `quality-gates` still depends on `tests` because it consumes coverage artifacts +- Existing doc-frontmatter unit/integration coverage remained unchanged by the workflow delta + +**Status:** ✅ Delta implemented and verified diff --git a/openspec/changes/doc-frontmatter-schema/proposal.md b/openspec/changes/doc-frontmatter-schema/proposal.md index cccc9eef..5d742039 100644 --- a/openspec/changes/doc-frontmatter-schema/proposal.md +++ b/openspec/changes/doc-frontmatter-schema/proposal.md @@ -16,7 +16,7 @@ This change introduces a frontmatter-based documentation ownership and validatio ### New Components - **YAML Frontmatter Schema**: Standardized metadata format for documentation files -- **Validation Script**: `scripts/check-doc-frontmatter.py` to validate doc ownership +- **Validation Script**: `scripts/check_doc_frontmatter.py` to validate doc ownership - **Pre-commit Hook**: Integration with existing pre-commit workflow - **Documentation**: Updated contributing guidelines with frontmatter requirements @@ -24,6 +24,9 @@ This change introduces a frontmatter-based documentation ownership and validatio - **Documentation Files**: Add frontmatter to existing docs in `docs/` directory - **Pre-commit Config**: `.pre-commit-config.yaml` updated with new validation hook - **Contributing Guidelines**: `docs/contributing/` updated with frontmatter documentation +- **PR Orchestrator Workflow**: `.github/workflows/pr-orchestrator.yml` dependency graph reduced so + independent validation jobs can start after signature verification instead of waiting for the + full Python 3.12 test suite ## Capabilities @@ -33,23 +36,30 @@ This change introduces a frontmatter-based documentation ownership and validatio - `docs-contributing-updates`: Updated contributing guidelines with frontmatter documentation ### Modified Capabilities -None - this is a new feature that doesn't modify existing capabilities +- `pr-orchestrator-parallelization`: PR workflow jobs that do not consume test artifacts run in + parallel after the shared signature gate ## Impact ### Files to Create -- `scripts/check-doc-frontmatter.py` - Validation script +- `scripts/check_doc_frontmatter.py` - Validation script - `docs/contributing/docs-sync.md` - Frontmatter documentation ### Files to Modify - `.pre-commit-config.yaml` - Add validation hook - `docs/**/*.md` - Add frontmatter metadata - `USAGE-FAQ.md` - Add frontmatter (root-level doc) +- `.github/workflows/pr-orchestrator.yml` - remove unnecessary `needs: tests` / downstream + serialization for independent CI jobs +- `tests/unit/specfact_cli/registry/test_signing_artifacts.py` - lock PR orchestrator dependency + graph for parallel execution ### Development Workflow - Developers must add frontmatter to new documentation files - Pre-commit hooks validate doc metadata on every commit - CI workflows can use metadata for automated sync checking +- PR validation jobs that do not require coverage artifacts or prior test completion should not wait + for the `Tests (Python 3.12)` job ### Quality Gates - Zero errors policy: All validation must pass @@ -58,9 +68,12 @@ None - this is a new feature that doesn't modify existing capabilities - Git worktree patterns: Use git worktrees for isolated development ### GitHub Integration -- GitHub issue sync via specfact after openspec change creation -- Proper labels: `documentation`, `quality`, `automation` -- Link to parent epic: `feature/docs-sync-epic` + +- **Issue**: [#461](https://github.com/nold-ai/specfact-cli/issues/461) — `[Change] Doc Frontmatter Schema & Validation` +- **Labels**: `enhancement`, `documentation`, `change-proposal`, `openspec` +- **Parent Feature**: [#356](https://github.com/nold-ai/specfact-cli/issues/356) — Documentation & Discrepancy Remediation (cross-linked in thread on #356) +- **Repository**: `nold-ai/specfact-cli` +- **Status**: synced 2026-03-29 ## Success Criteria @@ -88,4 +101,4 @@ None - this is a new feature that doesn't modify existing capabilities - CI Docs Sync Check (separate change: `ci-docs-sync-check`) - Automated doc update suggestions - Impact analysis for source changes -- Ownership visualization tools \ No newline at end of file +- Ownership visualization tools diff --git a/openspec/changes/doc-frontmatter-schema/specs/doc-frontmatter-validation/spec.md b/openspec/changes/doc-frontmatter-schema/specs/doc-frontmatter-validation/spec.md index 7cb7015f..b233cf26 100644 --- a/openspec/changes/doc-frontmatter-schema/specs/doc-frontmatter-validation/spec.md +++ b/openspec/changes/doc-frontmatter-schema/specs/doc-frontmatter-validation/spec.md @@ -3,16 +3,18 @@ ## ADDED Requirements ### Requirement: Validation Script Implementation -The system SHALL provide a validation script `scripts/check-doc-frontmatter.py` that enforces frontmatter requirements. +The system SHALL provide a validation script `scripts/check_doc_frontmatter.py` that enforces frontmatter requirements. #### Scenario: Script execution with valid docs -- **WHEN** `check-doc-frontmatter.py` is executed + +- **WHEN** `check_doc_frontmatter.py` is executed - **AND** all tracked docs have valid frontmatter - **THEN** script SHALL exit with code 0 - **AND** output SHALL show success message #### Scenario: Script execution with invalid docs -- **WHEN** `check-doc-frontmatter.py` is executed + +- **WHEN** `check_doc_frontmatter.py` is executed - **AND** some docs have invalid frontmatter - **THEN** script SHALL exit with code 1 - **AND** output SHALL list all validation failures @@ -21,11 +23,13 @@ The system SHALL provide a validation script `scripts/check-doc-frontmatter.py` The system SHALL detect documentation files that are missing the `doc_owner` field. #### Scenario: Missing doc_owner in tracked file + - **WHEN** a tracked Markdown file lacks `doc_owner` field - **THEN** validation SHALL fail - **AND** error SHALL specify missing field #### Scenario: Valid doc_owner present + - **WHEN** a tracked Markdown file has valid `doc_owner` - **THEN** validation SHALL pass for owner requirement @@ -33,14 +37,17 @@ The system SHALL detect documentation files that are missing the `doc_owner` fie The system SHALL validate that `doc_owner` values resolve to existing paths or known tokens. #### Scenario: Owner resolves to existing path + - **WHEN** `doc_owner` is a path that exists in repository - **THEN** validation SHALL pass for owner resolution #### Scenario: Owner is valid known token + - **WHEN** `doc_owner` is in `VALID_OWNER_TOKENS` - **THEN** validation SHALL pass for owner resolution #### Scenario: Owner cannot be resolved + - **WHEN** `doc_owner` doesn't resolve to path or token - **THEN** validation SHALL fail - **AND** error SHALL suggest valid alternatives @@ -49,11 +56,13 @@ The system SHALL validate that `doc_owner` values resolve to existing paths or k The system SHALL provide helpful fix hints when validation fails. #### Scenario: Fix hint for missing frontmatter + - **WHEN** validation fails due to missing frontmatter - **AND** `--fix-hint` flag is used - **THEN** output SHALL include suggested frontmatter template #### Scenario: Fix hint for invalid owner + - **WHEN** validation fails due to invalid owner - **AND** `--fix-hint` flag is used - **THEN** output SHALL suggest valid owner alternatives @@ -62,11 +71,13 @@ The system SHALL provide helpful fix hints when validation fails. The system SHALL discover all Markdown files that should be tracked for frontmatter validation. #### Scenario: Discover files in docs directory + - **WHEN** script runs - **THEN** all `docs/**/*.md` files SHALL be discovered - **AND** exempt files SHALL be excluded #### Scenario: Discover root-level docs + - **WHEN** script runs - **THEN** configured root-level docs SHALL be discovered - **AND** exempt files SHALL be excluded @@ -75,10 +86,12 @@ The system SHALL discover all Markdown files that should be tracked for frontmat The system SHALL properly handle files marked as exempt. #### Scenario: Exempt file with valid reason + - **WHEN** file has `exempt: true` with valid reason - **THEN** file SHALL be excluded from validation #### Scenario: Non-exempt file validation + - **WHEN** file has `exempt: false` or no exempt field - **THEN** file SHALL undergo full validation @@ -90,6 +103,7 @@ The validation script SHALL use `@icontract` decorators for validation logic: - `@ensure` for validation results #### Scenario: Invalid file path input + - **WHEN** script receives invalid file path - **THEN** `@require` contract SHALL raise appropriate exception @@ -97,6 +111,7 @@ The validation script SHALL use `@icontract` decorators for validation logic: The script SHALL handle errors gracefully with appropriate contracts. #### Scenario: File read error + - **WHEN** script encounters file read error - **THEN** error SHALL be caught and handled - **AND** script SHALL continue with other files @@ -107,6 +122,7 @@ The script SHALL handle errors gracefully with appropriate contracts. The validation script SHALL process files efficiently. #### Scenario: Large documentation set + - **WHEN** script processes 100+ documentation files - **THEN** execution SHALL complete in < 2 seconds @@ -114,5 +130,24 @@ The validation script SHALL process files efficiently. The script SHALL be memory efficient. #### Scenario: Memory usage with many files + - **WHEN** script processes large number of files -- **THEN** memory usage SHALL remain under 100MB \ No newline at end of file +- **THEN** memory usage SHALL remain under 100MB + +### Requirement: PR Orchestrator Parallel Job Graph +The PR orchestrator workflow SHALL not serialize independent validation jobs behind the Python 3.12 +test suite when those jobs do not consume test artifacts. + +#### Scenario: Independent jobs start after shared signature gate + +- **WHEN** `.github/workflows/pr-orchestrator.yml` defines `compat-py311`, `contract-first-ci`, + `type-checking`, `linting`, and `cli-validation` +- **THEN** each of those jobs SHALL depend on `changes` and the shared signature gate +- **AND** none of those jobs SHALL list `tests` as a required predecessor unless they consume test + artifacts from that job + +#### Scenario: Coverage-based advisory gate still depends on tests + +- **WHEN** `quality-gates` reads the `coverage-reports` artifact +- **THEN** `quality-gates` SHALL keep `tests` as an explicit dependency +- **AND** the workflow SHALL continue to gate that job on unit-coverage availability diff --git a/openspec/changes/doc-frontmatter-schema/tasks.md b/openspec/changes/doc-frontmatter-schema/tasks.md index a7494cff..98cd24c0 100644 --- a/openspec/changes/doc-frontmatter-schema/tasks.md +++ b/openspec/changes/doc-frontmatter-schema/tasks.md @@ -28,11 +28,12 @@ Do not implement production code until tests exist and have been run (expecting - [x] 2.2 Add test dependencies - [x] 2.2.1 Ensure PyYAML is in requirements: `pip install pyyaml` - - [ ] 2.2.2 Add to project dependencies if needed + - [x] 2.2.2 Add to project dependencies if needed (PyYAML already in `pyproject.toml`) ## 3. Test Implementation (TDD - Create Tests First) ### 3.1 Frontmatter Schema Tests + - [x] 3.1.1 Create `tests/unit/scripts/test_doc_frontmatter/test_schema.py` - [x] 3.1.1.1 Test valid frontmatter parsing - [x] 3.1.1.2 Test missing required fields detection @@ -45,6 +46,7 @@ Do not implement production code until tests exist and have been run (expecting - [x] 3.1.2.2 Capture failure output in `openspec/changes/doc-frontmatter-schema/TDD_EVIDENCE.md` ### 3.2 Validation Logic Tests + - [x] 3.2.1 Create `tests/unit/scripts/test_doc_frontmatter/test_validation.py` - [x] 3.2.1.1 Test missing doc_owner detection - [x] 3.2.1.2 Test invalid owner resolution @@ -57,6 +59,7 @@ Do not implement production code until tests exist and have been run (expecting - [x] 3.2.2.2 Capture failure output ### 3.3 Integration Tests + - [x] 3.3.1 Create `tests/integration/scripts/test_doc_frontmatter/test_integration.py` - [x] 3.3.1.1 Test end-to-end validation workflow - [x] 3.3.1.2 Test multiple file scenarios @@ -69,7 +72,8 @@ Do not implement production code until tests exist and have been run (expecting ## 4. Implementation (Create Code to Pass Tests) ### 4.1 Frontmatter Schema Implementation -- [x] 4.1.1 Create `scripts/check-doc-frontmatter.py` + +- [x] 4.1.1 Create `scripts/check_doc_frontmatter.py` - [x] 4.1.1.1 Implement `parse_frontmatter()` function with `@icontract` and `@beartype` - [x] 4.1.1.2 Implement `extract_doc_owner()` function - [x] 4.1.1.3 Implement `resolve_owner()` function @@ -77,92 +81,120 @@ Do not implement production code until tests exist and have been run (expecting - [x] 4.1.1.5 Implement `suggest_frontmatter()` function ### 4.2 Validation Logic Implementation -- [x] 4.2.1 Complete `check-doc-frontmatter.py` validation logic + +- [x] 4.2.1 Complete `check_doc_frontmatter.py` validation logic - [x] 4.2.1.1 Implement `get_all_md_files()` function - [x] 4.2.1.2 Implement `rg_missing_doc_owner()` function - [x] 4.2.1.3 Implement main validation loop - [x] 4.2.1.4 Implement error reporting with color coding ### 4.3 Command-Line Interface -- [x] 4.3.1 Add argument parsing with `--fix-hint` flag - - [x] 4.3.1.1 Use `argparse` for CLI arguments + +- [x] 4.3.1 Add CLI with `--fix-hint` and `--all-docs` flags + - [x] 4.3.1.1 Use Typer for CLI arguments (Rich-backed help via Typer) - [x] 4.3.1.2 Implement help text and usage examples ### 4.4 Test Execution and Evidence -- [ ] 4.4.1 Run all tests and verify they now pass - - [ ] 4.4.1.1 `pytest tests/unit/scripts/test_doc_frontmatter/ -v` - - [ ] 4.4.1.2 `pytest tests/integration/scripts/test_doc_frontmatter/ -v` - - [ ] 4.4.1.3 Capture passing results in `TDD_EVIDENCE.md` + +- [x] 4.4.1 Run all tests and verify they now pass + - [x] 4.4.1.1 `pytest tests/unit/scripts/test_doc_frontmatter/ -v` + - [x] 4.4.1.2 `pytest tests/integration/scripts/test_doc_frontmatter/ -v` + - [x] 4.4.1.3 Capture passing results in `TDD_EVIDENCE.md` ## 5. Quality Gates and Validation ### 5.1 Code Quality Checks -- [ ] 5.1.1 Run formatting: `hatch run format` -- [ ] 5.1.2 Run type checking: `hatch run type-check` -- [ ] 5.1.3 Run linting: `hatch run lint` -- [ ] 5.1.4 Fix any issues found + +- [x] 5.1.1 Run formatting: `hatch run format` +- [x] 5.1.2 Run type checking: `hatch run type-check` +- [x] 5.1.3 Run linting: `hatch run lint` +- [x] 5.1.4 Fix any issues found ### 5.2 Contract Validation -- [ ] 5.2.1 Run contract tests: `hatch run contract-test` -- [ ] 5.2.2 Ensure all `@icontract` and `@beartype` decorators work correctly + +- [x] 5.2.1 Run contract tests: `hatch run contract-test` +- [x] 5.2.2 Ensure all `@icontract` and `@beartype` decorators work correctly ### 5.3 OpenSpec Validation -- [ ] 5.3.1 Run `openspec validate doc-frontmatter-schema --strict` -- [ ] 5.3.2 Fix any validation issues + +- [x] 5.3.1 Run `openspec validate doc-frontmatter-schema --strict` +- [x] 5.3.2 Fix any validation issues + +### 5.4 PR Orchestrator Parallelization Delta + +- [x] 5.4.1 Extend spec/tests to cover PR orchestrator dependency parallelization + - [x] 5.4.1.1 Add spec delta for independent CI job dependencies + - [x] 5.4.1.2 Add workflow dependency tests in `tests/unit/specfact_cli/registry/test_signing_artifacts.py` + - [x] 5.4.1.3 Run the new workflow dependency tests and record the expected failure in `TDD_EVIDENCE.md` +- [x] 5.4.2 Update `.github/workflows/pr-orchestrator.yml` + - [x] 5.4.2.1 Remove unnecessary `tests` / downstream dependencies for jobs that do not consume test artifacts + - [x] 5.4.2.2 Keep real artifact dependencies intact (`quality-gates`, main-branch packaging) +- [x] 5.4.3 Re-run workflow dependency tests and doc-frontmatter regression slice + - [x] 5.4.3.1 Capture passing evidence in `TDD_EVIDENCE.md` ## 6. Documentation Updates ### 6.1 Frontmatter Schema Documentation -- [ ] 6.1.1 Create `docs/contributing/docs-sync.md` - - [ ] 6.1.1.1 Document frontmatter schema with examples - - [ ] 6.1.1.2 Explain each field and its purpose - - [ ] 6.1.1.3 Provide common patterns and best practices + +- [x] 6.1.1 Create `docs/contributing/docs-sync.md` + - [x] 6.1.1.1 Document frontmatter schema with examples + - [x] 6.1.1.2 Explain each field and its purpose + - [x] 6.1.1.3 Provide common patterns and best practices ### 6.2 Validation Workflow Documentation -- [ ] 6.2.1 Add validation workflow section to docs - - [ ] 6.2.1.1 Explain how validation works - - [ ] 6.2.1.2 Document error messages and fixes - - [ ] 6.2.1.3 Provide troubleshooting guide + +- [x] 6.2.1 Add validation workflow section to docs + - [x] 6.2.1.1 Explain how validation works + - [x] 6.2.1.2 Document error messages and fixes + - [x] 6.2.1.3 Provide troubleshooting guide ### 6.3 Update Contributing Guidelines -- [ ] 6.3.1 Update `CONTRIBUTING.md` with frontmatter requirements -- [ ] 6.3.2 Add frontmatter section to documentation standards + +- [x] 6.3.1 Update `CONTRIBUTING.md` with frontmatter requirements +- [x] 6.3.2 Add frontmatter section to documentation standards (`docs/contributing/docs-sync.md` is the canonical standards page for this rollout) ## 7. Pre-commit Integration ### 7.1 Update Pre-commit Configuration -- [ ] 7.1.1 Modify `.pre-commit-config.yaml` - - [ ] 7.1.1.1 Add `check-doc-frontmatter` hook - - [ ] 7.1.1.2 Configure appropriate file patterns - - [ ] 7.1.1.3 Set `always_run: false` + +- [x] 7.1.1 Modify `.pre-commit-config.yaml` + - [x] 7.1.1.1 Add `check-doc-frontmatter` hook + - [x] 7.1.1.2 Configure appropriate file patterns + - [x] 7.1.1.3 Set `always_run: false` ### 7.2 Test Pre-commit Hook -- [ ] 7.2.1 Run `pre-commit install` to install hooks -- [ ] 7.2.2 Test hook with sample files -- [ ] 7.2.3 Verify hook works correctly + +- [x] 7.2.1 Run `pre-commit install` to install hooks +- [x] 7.2.2 Test hook with sample files +- [x] 7.2.3 Verify hook works correctly ## 8. Sample Implementation and Testing ### 8.1 Add Frontmatter to Sample Files -- [ ] 8.1.1 Add frontmatter to 3-5 sample documentation files -- [ ] 8.1.2 Test validation with sample files -- [ ] 8.1.3 Verify no validation errors + +- [x] 8.1.1 Add frontmatter to 3-5 sample documentation files +- [x] 8.1.2 Test validation with sample files +- [x] 8.1.3 Verify no validation errors ### 8.2 Test Error Scenarios -- [ ] 8.2.1 Create files with invalid frontmatter -- [ ] 8.2.2 Test validation error detection -- [ ] 8.2.3 Verify error messages are helpful + +- [x] 8.2.1 Create files with invalid frontmatter +- [x] 8.2.2 Test validation error detection +- [x] 8.2.3 Verify error messages are helpful ## 9. Create GitHub Issue and PR ### 9.1 Create GitHub Issue -- [ ] 9.1.1 Create issue in nold-ai/specfact-cli - - [ ] 9.1.1.1 Title: `[Change] Doc Frontmatter Schema & Validation` - - [ ] 9.1.1.2 Labels: `enhancement`, `change-proposal`, `documentation`, `quality` - - [ ] 9.1.1.3 Body: Summary from proposal.md - - [ ] 9.1.1.4 Link to parent epic: `feature/docs-sync-epic` + +- [x] 9.1.1 Create issue in nold-ai/specfact-cli + - [x] 9.1.1.1 Title: `[Change] Doc Frontmatter Schema & Validation` + - [x] 9.1.1.2 Labels: `enhancement`, `change-proposal`, `documentation`, `openspec` (repo-standard; `quality` not a label — use `QA` if needed later) + - [x] 9.1.1.3 Body: Summary from proposal.md + OpenSpec paths + scope + - [x] 9.1.1.4 Parent Feature: [#356](https://github.com/nold-ai/specfact-cli/issues/356) (linked in issue body; tracking comment on #356) + - [x] 9.1.1.5 **Issue number**: [#461](https://github.com/nold-ai/specfact-cli/issues/461) ### 9.2 Create Pull Request + - [ ] 9.2.1 Prepare commit with all changes - [ ] 9.2.1.1 `git add .` - [ ] 9.2.1.2 `git commit -m "feat: add doc frontmatter schema and validation"` @@ -183,4 +215,4 @@ Do not implement production code until tests exist and have been run (expecting - [ ] 10.3 `git worktree remove ../specfact-cli-worktrees/feature/doc-frontmatter-schema` - [ ] 10.4 `git branch -d feature/doc-frontmatter-schema` - [ ] 10.5 `git worktree prune` -- [ ] 10.6 (Optional) `git push origin --delete feature/doc-frontmatter-schema` \ No newline at end of file +- [ ] 10.6 (Optional) `git push origin --delete feature/doc-frontmatter-schema` diff --git a/openspec/config.yaml b/openspec/config.yaml index 524f18ed..bd1f5118 100644 --- a/openspec/config.yaml +++ b/openspec/config.yaml @@ -61,6 +61,11 @@ context: | `openspec/changes//TDD_EVIDENCE.md` for every behavior change. - If the pattern does not work in practice, adjust the process until it does. + Code review JSON (dogfood): Treat `.specfact/code-review.json` as mandatory evidence before an OpenSpec + change is complete. If the file is missing or stale (see tasks), run + `specfact code review run --json --out .specfact/code-review.json` and resolve **every** finding at any + severity (warning or error) unless the proposal documents a rare, explicit exception. + # Per-artifact rules (only injected into matching artifacts) rules: proposal: @@ -158,3 +163,19 @@ rules: - body following `.github/ISSUE_TEMPLATE/change_proposal.md` template (Why, What Changes sections from proposal) - footer `*OpenSpec Change Proposal: *` - After creation, update proposal.md Source Tracking section with issue number, URL, repository, and status. + - |- + SpecFact code review JSON (dogfood, required before PR): Include tasks to + - (1) Ensure `.specfact/code-review.json` is present and **fresh**: if the file is missing, or its + last-modified time is older than any file you changed in this change under `src/`, `scripts/`, + `tools/`, `tests/`, or under `openspec/changes//` **excluding** + `openspec/changes//TDD_EVIDENCE.md` (evidence-only updates there do not by themselves + invalidate the review file; re-run when proposal, specs, tasks, design, or code change). Run a new + review, e.g. + `hatch run specfact code review run --json --out .specfact/code-review.json` with `--scope changed` + during iteration and `--scope full` (or equivalent coverage) before the final PR. + - (2) Read the JSON report and remediate **all** findings regardless of severity (warning, advisory, + error, or equivalent in the schema): treat them as blocking until fixed or the proposal explicitly + documents a justified exception approved in the change. + - (3) Re-run the review after substantive edits until the report shows a passing outcome per the + review module (e.g. overall verdict PASS / CI exit 0); record the review command(s) and timestamp + in `TDD_EVIDENCE.md` or the PR description when the change touches behavior or quality gates. diff --git a/pyproject.toml b/pyproject.toml index 4cb884b4..95873e27 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "specfact-cli" -version = "0.43.1" +version = "0.43.2" description = "The swiss knife CLI for agile DevOps teams. Keep backlog, specs, tests, and code in sync with validation and contract enforcement for new projects and long-lived codebases." readme = "README.md" requires-python = ">=3.11" @@ -220,7 +220,8 @@ yaml-check-all = "bash scripts/yaml-tools.sh check-all {args}" # Docs validation (docs-12): command examples vs CLI; modules.specfact.io URLs in docs check-docs-commands = "python scripts/check-docs-commands.py" check-cross-site-links = "python scripts/check-cross-site-links.py" -docs-validate = "python scripts/check-docs-commands.py && python scripts/check-cross-site-links.py --warn-only" +doc-frontmatter-check = "python scripts/check_doc_frontmatter.py" +docs-validate = "python scripts/check-docs-commands.py && python scripts/check-cross-site-links.py --warn-only && python scripts/check_doc_frontmatter.py" # Legacy entry (kept for compatibility); prefer `workflows-lint` above lint-workflows = "bash scripts/run_actionlint.sh {args}" @@ -716,6 +717,7 @@ ignore = [ "PLR2004", # magic value comparison "T20", # print found "SLF001", # private member accessed + "E402", # imports after sys.path bootstrap in targeted test modules ] # Tools and scripts can be more lenient diff --git a/scripts/check-cross-site-links.py b/scripts/check-cross-site-links.py old mode 100644 new mode 100755 diff --git a/scripts/check-docs-commands.py b/scripts/check-docs-commands.py old mode 100644 new mode 100755 diff --git a/scripts/check_doc_frontmatter.py b/scripts/check_doc_frontmatter.py new file mode 100755 index 00000000..83391511 --- /dev/null +++ b/scripts/check_doc_frontmatter.py @@ -0,0 +1,554 @@ +#!/usr/bin/env python3 +"""Validate YAML doc ownership frontmatter for Markdown documentation.""" + +from __future__ import annotations + +import datetime +import fnmatch +import functools +import os +import re +import subprocess +import sys +from pathlib import Path +from typing import Any + +import typer +import yaml +from beartype import beartype +from icontract import ensure, require +from pydantic import BaseModel, ConfigDict, Field, ValidationError, ValidationInfo, field_validator, model_validator +from rich.console import Console +from typer.main import get_command + + +_ERR = Console(stderr=True) + +FRONTMATTER_RE = re.compile(r"^---\s*\n(.*?)\n---", re.DOTALL) +DOC_OWNER_RE = re.compile(r"^\s*doc_owner\s*:\s*['\"]?(.+?)['\"]?\s*$", re.MULTILINE) +LAST_REVIEWED_RE = re.compile(r"^\d{4}-\d{2}-\d{2}$") + +TRACKED_DOC_DIRS = ("docs",) +REQUIRED_ROOT_DOCS: tuple[str, ...] = ("USAGE-FAQ.md",) +EXEMPT_FILES = frozenset( + { + "docs/LICENSE.md", + "docs/TRADEMARKS.md", + "CHANGELOG.md", + "CLA.md", + "CODE_OF_CONDUCT.md", + "SECURITY.md", + "TRADEMARKS.md", + } +) +VALID_OWNER_TOKENS = frozenset({"specfact-cli", "nold-ai", "openspec"}) +SOURCE_ROOTS = (Path("src"), Path("openspec"), Path("modules"), Path("tools")) + +_OWNER_GLOB_METACHARS = frozenset("*?[]{}") +REQUIRED_KEYS = ("title", "doc_owner", "tracks", "last_reviewed", "exempt", "exempt_reason") + + +class DocFrontmatter(BaseModel): + """Validated doc-sync ownership record (YAML frontmatter subset).""" + + model_config = ConfigDict(extra="ignore", str_strip_whitespace=False) + + title: str = Field(..., description="Document title") + doc_owner: str = Field(..., description="Owner path or known token") + tracks: list[str] = Field(..., min_length=1, description="Glob patterns for tracked sources") + last_reviewed: datetime.date = Field(..., description="Last review date (YYYY-MM-DD)") + exempt: bool = Field(..., description="Whether the page is exempt from sync rules") + exempt_reason: str = Field(..., description="Reason when exempt; empty string when not exempt") + + @field_validator("tracks", mode="before") + @classmethod + def _tracks_must_be_string_list(cls, value: object) -> list[str]: + if not isinstance(value, list) or not all(isinstance(x, str) for x in value): + raise ValueError("`tracks` must be a list of strings") + return list(value) + + @field_validator("last_reviewed", mode="before") + @classmethod + def _normalize_last_reviewed_field(cls, value: object) -> datetime.date: + if isinstance(value, datetime.datetime): + return value.date() + if isinstance(value, datetime.date): + return value + if isinstance(value, str): + s = value.strip() + if LAST_REVIEWED_RE.match(s): + y, m, d = (int(x) for x in s.split("-")) + return datetime.date(y, m, d) + raise ValueError("`last_reviewed` must be YYYY-MM-DD") + + @field_validator("exempt", mode="before") + @classmethod + def _exempt_must_be_bool(cls, value: object) -> bool: + if not isinstance(value, bool): + raise ValueError("`exempt` must be a boolean") + return value + + @field_validator("doc_owner", "title", "exempt_reason", mode="before") + @classmethod + def _required_strings(cls, value: object, info: ValidationInfo) -> str: + if not isinstance(value, str): + raise ValueError(f"`{info.field_name}` must be a string") + return value + + @model_validator(mode="after") + def _tracks_globs_valid(self) -> DocFrontmatter: + if not validate_glob_patterns(list(self.tracks)): + raise ValueError("invalid glob pattern in `tracks`") + return self + + @model_validator(mode="after") + def _owner_and_exempt_rules(self) -> DocFrontmatter: + if not resolve_owner(self.doc_owner): + raise ValueError( + "doc_owner `" + + self.doc_owner + + "` does not resolve (tokens: " + + ", ".join(sorted(VALID_OWNER_TOKENS)) + + " or repo path)", + ) + if self.exempt and not self.exempt_reason.strip(): + raise ValueError("`exempt_reason` required when exempt is true") + return self + + +DocFrontmatter.model_rebuild(_types_namespace={"datetime": datetime}) + + +def _format_doc_frontmatter_errors(exc: ValidationError) -> list[str]: + """Turn Pydantic errors into the same style as legacy manual validation.""" + out: list[str] = [] + for err in exc.errors(): + loc = err.get("loc") or () + key = str(loc[0]) if loc else "record" + typ = err.get("type", "") + if typ == "missing": + out.append(f"missing `{key}`") + continue + msg = str(err.get("msg", "validation error")) + if msg.startswith("Value error, "): + msg = msg[len("Value error, ") :] + out.append(msg) + return out + + +def _root() -> Path: + """Repository root for validation (override with DOC_FRONTMATTER_ROOT in tests).""" + env = os.environ.get("DOC_FRONTMATTER_ROOT") + return Path(env).resolve() if env else Path(__file__).resolve().parents[1] + + +def _enforced_path() -> Path: + return _root() / "docs" / ".doc-frontmatter-enforced" + + +def _path_is_existing_file(path: Path) -> bool: + return path.is_file() + + +@beartype +@require(_path_is_existing_file, "Path must be an existing file") +@ensure(lambda result: isinstance(result, dict), "Must return dict") +def parse_frontmatter(path: Path) -> dict[str, Any]: + """Parse YAML frontmatter from a Markdown file.""" + content = path.read_text(encoding="utf-8") + match = FRONTMATTER_RE.match(content) + if not match: + return {} + loaded = yaml.safe_load(match.group(1)) + return loaded if isinstance(loaded, dict) else {} + + +@beartype +@require(lambda content: isinstance(content, str), "Content must be string") +@ensure(lambda result: result is None or isinstance(result, str), "Must return string or None") +def extract_doc_owner(content: str) -> str | None: + """Extract doc_owner value from raw Markdown content.""" + match = DOC_OWNER_RE.search(content) + return match.group(1).strip() if match else None + + +def _owner_literal_is_safe(owner: str) -> bool: + """Reject empty owners, traversal segments, and glob metacharacters.""" + if not isinstance(owner, str): + return False + s = owner.strip() + if not s: + return False + if any(ch in _OWNER_GLOB_METACHARS for ch in s): + return False + parts = Path(s).as_posix().split("/") + return all(part not in ("", ".", "..") for part in parts) + + +def _is_resolved_under(child: Path, root: Path) -> bool: + """True if ``child`` resolves to a path under ``root`` (same device).""" + try: + child.resolve().relative_to(root.resolve()) + except ValueError: + return False + return True + + +def _resolve_owner_absolute_path(owner: str, base_root: Path) -> bool: + """Return True if ``owner`` is an absolute path inside ``base_root`` that exists.""" + candidate = Path(owner) + if not candidate.is_absolute(): + return False + try: + resolved = candidate.resolve() + except OSError: + return False + if not _is_resolved_under(resolved, base_root): + return False + return resolved.exists() + + +def _resolve_owner_repo_relative(owner: str, base_root: Path) -> bool: + """Return True if ``base_root/owner`` resolves under ``base_root`` and exists.""" + rel = (base_root / owner).resolve() + if not _is_resolved_under(rel, base_root): + return False + return rel.exists() + + +def _glob_owner_dir_under_root(owner: str, root: Path) -> bool: + """Return True if a directory named ``owner`` exists anywhere under ``root`` (single-segment owners).""" + try: + for match in root.glob(f"**/{owner}"): + if match.is_dir(): + return True + except OSError: + return False + return False + + +def _find_owner_under_source_roots(owner: str, base_root: Path, owner_single_segment: bool) -> bool: + """Search ``SOURCE_ROOTS`` for a path or directory name matching ``owner``.""" + for rel_root in SOURCE_ROOTS: + root = (base_root / rel_root).resolve() + if not root.exists(): + continue + joined = (root / owner).resolve() + if _is_resolved_under(joined, root) and joined.exists(): + return True + if owner_single_segment and _glob_owner_dir_under_root(owner, root): + return True + return False + + +@beartype +@require(lambda owner: isinstance(owner, str), "Owner must be string") +@ensure(lambda result: isinstance(result, bool), "Must return bool") +def resolve_owner(owner: str) -> bool: + """Return True if owner is a known token or an existing path under the repo.""" + return _resolve_owner_impl(owner, str(_root().resolve())) + + +@functools.lru_cache(maxsize=256) +def _resolve_owner_impl(owner: str, root_key: str) -> bool: + """Memoized owner resolution keyed by owner and repository root (see ``resolve_owner``).""" + if owner in VALID_OWNER_TOKENS: + return True + if not _owner_literal_is_safe(owner): + return False + base_root = Path(root_key).resolve() + candidate = Path(owner) + if candidate.is_absolute(): + return _resolve_owner_absolute_path(owner, base_root) + if _resolve_owner_repo_relative(owner, base_root): + return True + owner_single_segment = "/" not in owner.strip().rstrip("/") + return _find_owner_under_source_roots(owner, base_root, owner_single_segment) + + +@beartype +@require(lambda patterns: isinstance(patterns, list), "Patterns must be list") +@ensure(lambda result: isinstance(result, bool), "Must return bool") +def validate_glob_patterns(patterns: list[str]) -> bool: + """Return True if patterns are non-empty, balanced for `[]`/`{}`, and compile as fnmatch regex. + + Full glob semantics follow Python :func:`fnmatch.fnmatch`; this adds bracket/brace balance + checks and a best-effort :func:`fnmatch.translate` + :func:`re.compile` pass for invalid + metacharacter sequences. See ``docs/contributing/frontmatter-schema.md``. + """ + if not patterns: + return False + for pattern in patterns: + p = str(pattern).strip() + if not p: + return False + if p.count("[") != p.count("]"): + return False + if p.count("{") != p.count("}"): + return False + try: + re.compile(fnmatch.translate(p)) + except re.error: + return False + return True + + +@beartype +@require(lambda path: isinstance(path, Path), "Path must be Path object") +@ensure(lambda result: isinstance(result, str), "Must return string") +def suggest_frontmatter(path: Path) -> str: + """Return a suggested frontmatter block for a document.""" + return f"""--- +title: "{path.stem}" +doc_owner: specfact-cli +tracks: + - src/specfact_cli/** + - openspec/** +last_reviewed: {datetime.date.today().isoformat()} +exempt: false +exempt_reason: "" +--- +""" + + +def _rel_posix(path: Path) -> str: + try: + return path.resolve().relative_to(_root().resolve()).as_posix() + except ValueError: + return str(path).replace("\\", "/") + + +def _load_enforced_patterns() -> list[str] | None: + """Return path patterns from docs/.doc-frontmatter-enforced, or None if file missing.""" + path = _enforced_path() + if not path.exists(): + return None + lines: list[str] = [] + for raw in path.read_text(encoding="utf-8").splitlines(): + line = raw.strip() + if not line or line.startswith("#"): + continue + lines.append(line) + return lines + + +def _matches_enforced(rel: str, patterns: list[str]) -> bool: + return any(rel == p or fnmatch.fnmatch(rel, p) for p in patterns) + + +@beartype +@ensure(lambda result: isinstance(result, list), "Must return list") +def get_all_md_files() -> list[Path]: + """Discover Markdown files that are candidates for validation.""" + root = _root() + files: list[Path] = [] + for doc_dir in TRACKED_DOC_DIRS: + target = root / doc_dir + if target.exists(): + files.extend(target.rglob("*.md")) + for name in REQUIRED_ROOT_DOCS: + p = root / name + if p.exists(): + files.append(p) + + filtered: list[Path] = [] + for file_path in files: + rel = _rel_posix(file_path) + if rel in EXEMPT_FILES: + continue + try: + fm = parse_frontmatter(file_path) + except (OSError, yaml.YAMLError): + filtered.append(file_path) + continue + exempt_val = fm.get("exempt") + if exempt_val is True and str(fm.get("exempt_reason", "")).strip(): + continue + filtered.append(file_path) + return filtered + + +def _missing_owner_by_scan(files: list[Path]) -> list[Path]: + out: list[Path] = [] + for file_path in files: + try: + content = file_path.read_text(encoding="utf-8") + if not DOC_OWNER_RE.search(content): + out.append(file_path) + except OSError: + out.append(file_path) + return out + + +def _missing_owner_via_rg(files: list[Path]) -> list[Path] | None: + """Return missing list, or None to fall back to line-by-line scan.""" + file_args = [str(f) for f in files] + try: + result = subprocess.run( + ["rg", "--files-without-match", r"^\s*doc_owner\s*:", *file_args], + capture_output=True, + text=True, + timeout=60, + ) + except subprocess.TimeoutExpired: + return None + # rg: 0 = listed files exist, 1 = no listings (e.g. all files match pattern), 2+ = error + if result.returncode >= 2: + return None + if result.returncode == 1: + return [] + missing: list[Path] = [] + for line in result.stdout.strip().splitlines(): + if line.strip(): + missing.append(Path(line)) + return missing + + +@beartype +@require(lambda files: isinstance(files, list), "Files must be list") +@ensure(lambda result: isinstance(result, list), "Must return list") +def rg_missing_doc_owner(files: list[Path]) -> list[Path]: + """Return paths whose raw content lacks a doc_owner field.""" + if not files: + return [] + try: + via_rg = _missing_owner_via_rg(files) + if via_rg is not None: + return via_rg + except (FileNotFoundError, subprocess.TimeoutExpired, OSError): + pass + return _missing_owner_by_scan(files) + + +def _validate_record(fm: dict[str, Any]) -> list[str]: + """Return human-readable errors for a frontmatter dict (non-exempt docs).""" + missing = [f"missing `{key}`" for key in REQUIRED_KEYS if key not in fm] + if missing: + return missing + try: + DocFrontmatter.model_validate(fm) + except ValidationError as exc: + return _format_doc_frontmatter_errors(exc) + return [] + + +def _discover_paths_to_check(all_docs: bool) -> list[Path] | None: + """Return files to validate, or None when the run should exit 0 early (skip).""" + if all_docs: + return get_all_md_files() + enforced = _load_enforced_patterns() + if enforced is None: + _ERR.print( + "doc-frontmatter: docs/.doc-frontmatter-enforced not found — skipping " + "(use --all-docs to validate every doc).", + ) + return None + if len(enforced) == 0: + _ERR.print("doc-frontmatter: enforced list is empty — nothing to check.") + return None + return [f for f in get_all_md_files() if _matches_enforced(_rel_posix(f), enforced)] + + +def _collect_failures( + all_files: list[Path], + files_without_owner: list[Path], + fix_hint: bool, +) -> list[str]: + failures: list[str] = [] + for file_path in all_files: + rel = _rel_posix(file_path) + if file_path in files_without_owner: + if fix_hint: + failures.append( + f" ✗ MISSING doc_owner: {rel}\n\n Suggested frontmatter:\n{suggest_frontmatter(file_path)}" + ) + else: + failures.append(f" ✗ MISSING doc_owner: {rel}") + continue + try: + fm = parse_frontmatter(file_path) + except (OSError, yaml.YAMLError) as exc: + failures.append(f" ✗ YAML parse error: {rel}: {exc}") + continue + errs = _validate_record(fm) + if errs: + msg = f" ✗ INVALID frontmatter: {rel}\n - " + "\n - ".join(errs) + if fix_hint: + msg += f"\n\n Suggested frontmatter:\n{suggest_frontmatter(file_path)}" + failures.append(msg) + return failures + + +def _argv_ok(argv: list[str] | None) -> bool: + return argv is None or all(isinstance(x, str) for x in argv) + + +def _run_validation(fix_hint: bool, all_docs: bool) -> int: + all_files = _discover_paths_to_check(all_docs) + if all_files is None: + return 0 + if not all_files: + _ERR.print("✅ No documentation paths matched — nothing to check.") + return 0 + + files_without_owner = rg_missing_doc_owner(all_files) + failures = _collect_failures(all_files, files_without_owner, fix_hint) + + if failures: + _ERR.print(f"\n❌ Doc frontmatter validation failed ({len(failures)} issue(s)):\n") + for block in failures: + _ERR.print(block) + if not fix_hint: + _ERR.print("\n💡 Tip: run with --fix-hint for suggested frontmatter blocks.") + return 1 + + scope = "all docs" if all_docs else "enforced list" + _ERR.print(f"✅ Doc frontmatter OK — {len(all_files)} doc(s) checked (scope: {scope}).") + return 0 + + +app = typer.Typer( + add_completion=False, + no_args_is_help=False, + help="Validate documentation ownership frontmatter for Markdown files.", +) + + +@app.callback(invoke_without_command=True) +def _cli_root( + fix_hint: bool = typer.Option( + False, + "--fix-hint", + help="Print suggested YAML blocks for common failures.", + ), + all_docs: bool = typer.Option( + False, + "--all-docs", + help="Validate every discovered Markdown file (ignore docs/.doc-frontmatter-enforced).", + ), +) -> int: + """Validate doc frontmatter; exit code 0 on success, 1 on failure.""" + return _run_validation(fix_hint, all_docs) + + +@beartype +@require(_argv_ok, "argv must be None or a list of strings") +@ensure(lambda result: result in (0, 1), "exit code must be 0 or 1") +def main(argv: list[str] | None = None) -> int: + """CLI entry: validate doc frontmatter; exit 0 on success, 1 on failure.""" + args = list(sys.argv[1:]) if argv is None else list(argv) + cli = get_command(app) + try: + exit_code = cli.main(args=args, prog_name="doc-frontmatter-check", standalone_mode=False) + except SystemExit as exc: + code = exc.code + if isinstance(code, int): + return code + return 1 + if exit_code is None: + return 0 + return int(exit_code) + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/pre_commit_code_review.py b/scripts/pre_commit_code_review.py old mode 100644 new mode 100755 index 859fafda..33830869 --- a/scripts/pre_commit_code_review.py +++ b/scripts/pre_commit_code_review.py @@ -1,22 +1,35 @@ -"""Run specfact code review as a staged-file pre-commit gate.""" +"""Run specfact code review as a staged-file pre-commit gate. + +Writes a machine-readable JSON report to ``.specfact/code-review.json`` (gitignored) +so IDEs and Copilot can read findings; exit code still reflects the governed CI verdict. + +CrossHair: skip (importlib/subprocess side effects; not amenable to full symbolic execution) +""" -# CrossHair: ignore # This helper shells out to the CLI and is intentionally side-effecting. from __future__ import annotations import importlib +import json import subprocess import sys from collections.abc import Sequence from pathlib import Path +from subprocess import TimeoutExpired +from typing import Any, cast +from beartype import beartype from icontract import ensure, require PYTHON_SUFFIXES = {".py", ".pyi"} +# Default matches dogfood / OpenSpec: machine-readable report under ignored ``.specfact/``. +REVIEW_JSON_OUT = ".specfact/code-review.json" + +@beartype @require(lambda paths: paths is not None) @ensure(lambda result: len(result) == len(set(result))) @ensure(lambda result: all(Path(path).suffix.lower() in PYTHON_SUFFIXES for path in result)) @@ -34,42 +47,163 @@ def filter_review_files(paths: Sequence[str]) -> list[str]: return filtered +@beartype @require(lambda files: files is not None) @ensure(lambda result: result[:5] == [sys.executable, "-m", "specfact_cli.cli", "code", "review"]) -@ensure(lambda result: "--score-only" in result) +@ensure(lambda result: "--json" in result and "--out" in result) +@ensure(lambda result: REVIEW_JSON_OUT in result) def build_review_command(files: Sequence[str]) -> list[str]: - """Build the score-only review command used by the pre-commit gate.""" - return [sys.executable, "-m", "specfact_cli.cli", "code", "review", "run", "--score-only", *files] - - -@ensure(lambda result: isinstance(result[0], bool)) + """Build ``code review run --json --out …`` so findings are written for tooling.""" + return [ + sys.executable, + "-m", + "specfact_cli.cli", + "code", + "review", + "run", + "--json", + "--out", + REVIEW_JSON_OUT, + *files, + ] + + +def _repo_root() -> Path: + """Repository root (parent of ``scripts/``).""" + return Path(__file__).resolve().parents[1] + + +def _count_findings_by_severity(findings: list[object]) -> dict[str, int]: + """Bucket review findings by severity (unknown severities go to ``other``).""" + buckets = {"error": 0, "warning": 0, "advisory": 0, "info": 0, "other": 0} + for item in findings: + if not isinstance(item, dict): + buckets["other"] += 1 + continue + row = cast(dict[str, Any], item) + raw = row.get("severity") + if not isinstance(raw, str): + buckets["other"] += 1 + continue + key = raw.lower().strip() + if key in ("error", "err"): + buckets["error"] += 1 + elif key in ("warning", "warn"): + buckets["warning"] += 1 + elif key in ("advisory", "advise"): + buckets["advisory"] += 1 + elif key == "info": + buckets["info"] += 1 + else: + buckets["other"] += 1 + return buckets + + +def _print_review_findings_summary(repo_root: Path) -> None: + """Parse ``REVIEW_JSON_OUT`` and print a one-line findings count (errors / warnings / etc.).""" + report_path = repo_root / REVIEW_JSON_OUT + if not report_path.is_file(): + sys.stderr.write(f"Code review: no report file at {REVIEW_JSON_OUT} (could not print findings summary).\n") + return + try: + data = json.loads(report_path.read_text(encoding="utf-8")) + except (OSError, UnicodeDecodeError) as exc: + sys.stderr.write(f"Code review: could not read {REVIEW_JSON_OUT}: {exc}\n") + return + except json.JSONDecodeError as exc: + sys.stderr.write(f"Code review: invalid JSON in {REVIEW_JSON_OUT}: {exc}\n") + return + + if not isinstance(data, dict): + sys.stderr.write( + f"Code review: expected a JSON object at top level in {REVIEW_JSON_OUT} (got {type(data).__name__}).\n", + ) + return + + report: dict[str, Any] = cast(dict[str, Any], data) + findings_raw = report.get("findings") + if not isinstance(findings_raw, list): + sys.stderr.write(f"Code review: report has no findings list in {REVIEW_JSON_OUT}.\n") + return + + counts = _count_findings_by_severity(findings_raw) + total = len(findings_raw) + verdict = report.get("overall_verdict", "?") + parts = [ + f"errors={counts['error']}", + f"warnings={counts['warning']}", + f"advisory={counts['advisory']}", + ] + if counts["info"]: + parts.append(f"info={counts['info']}") + if counts["other"]: + parts.append(f"other={counts['other']}") + summary = ", ".join(parts) + # stderr keeps the summary separate from nested `specfact code review run` stdout; enable hook + # `verbose: true` in .pre-commit-config.yaml so pre-commit prints hook output when the hook passes. + sys.stderr.write(f"Code review summary: {total} finding(s) ({summary}); overall_verdict={verdict!r}.\n") + abs_report = report_path.resolve() + sys.stderr.write(f"Code review report file: {REVIEW_JSON_OUT}\n") + sys.stderr.write(f" absolute path: {abs_report}\n") + sys.stderr.write("Copy-paste for Copilot or Cursor:\n") + sys.stderr.write( + f" Read `{REVIEW_JSON_OUT}` and fix every finding (errors first), using file and line from each entry.\n" + ) + sys.stderr.write(f" @workspace Open `{REVIEW_JSON_OUT}` and remediate each item in `findings`.\n") + + +@beartype +@ensure(lambda result: isinstance(result, tuple) and len(result) == 2) +@ensure(lambda result: isinstance(result[0], bool) and (result[1] is None or isinstance(result[1], str))) def ensure_runtime_available() -> tuple[bool, str | None]: """Verify the current Python environment can import SpecFact CLI.""" try: importlib.import_module("specfact_cli.cli") - except ModuleNotFoundError: - return False, 'Install dev dependencies with `pip install -e ".[dev]"` or run `hatch env create`.' + except ModuleNotFoundError as exc: + if exc.name in ("specfact_cli", "specfact_cli.cli"): + return False, 'Install dev dependencies with `pip install -e ".[dev]"` or run `hatch env create`.' + raise return True, None +@beartype +@require(lambda argv: argv is None or isinstance(argv, (list, tuple))) @ensure(lambda result: isinstance(result, int)) def main(argv: Sequence[str] | None = None) -> int: - """Run the score-only review gate over staged Python files.""" - files = filter_review_files(list(argv or [])) - if not files: + """Run the code review gate; write JSON under ``.specfact/`` and return CLI exit code.""" + paths_arg = [] if argv is None else list(argv) + files = filter_review_files(paths_arg) + try: + files[0] + except IndexError: sys.stdout.write("No staged Python files to review; skipping code review gate.\n") return 0 available, guidance = ensure_runtime_available() - if not available: + if available is False: sys.stdout.write(f"Unable to run the code review gate. {guidance}\n") return 1 - result = subprocess.run(build_review_command(files), check=False, text=True, capture_output=True) - if result.stdout: - sys.stdout.write(result.stdout) - if result.stderr: - sys.stderr.write(result.stderr) + cmd = build_review_command(files) + report_path = _repo_root() / REVIEW_JSON_OUT + if report_path.is_file(): + report_path.unlink() + try: + result = subprocess.run( + cmd, + check=False, + text=True, + capture_output=True, + cwd=str(_repo_root()), + timeout=300, + ) + except TimeoutExpired: + joined_cmd = " ".join(cmd) + sys.stderr.write(f"Code review gate timed out after 300s (command: {joined_cmd!r}, files: {files!r}).\n") + return 1 + # Do not echo nested `specfact code review run` stdout/stderr (verbose tool banners); full report + # is in REVIEW_JSON_OUT; we print a short summary on stderr below. + _print_review_findings_summary(_repo_root()) return result.returncode diff --git a/scripts/publish-module.py b/scripts/publish-module.py old mode 100644 new mode 100755 diff --git a/scripts/validate-modules-repo-sync.py b/scripts/validate-modules-repo-sync.py old mode 100644 new mode 100755 diff --git a/scripts/verify-bundle-published.py b/scripts/verify-bundle-published.py old mode 100644 new mode 100755 diff --git a/setup.py b/setup.py index f5df4c72..525f4944 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ if __name__ == "__main__": _setup = setup( name="specfact-cli", - version="0.43.1", + version="0.43.2", description=( "The swiss knife CLI for agile DevOps teams. Keep backlog, specs, tests, and code in sync with " "validation and contract enforcement for new projects and long-lived codebases." diff --git a/src/__init__.py b/src/__init__.py index d44458c3..9678e8e8 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -3,4 +3,4 @@ """ # Package version: keep in sync with pyproject.toml, setup.py, src/specfact_cli/__init__.py -__version__ = "0.43.1" +__version__ = "0.43.2" diff --git a/src/specfact_cli/__init__.py b/src/specfact_cli/__init__.py index ef990799..bbbe0769 100644 --- a/src/specfact_cli/__init__.py +++ b/src/specfact_cli/__init__.py @@ -6,6 +6,9 @@ - Keeping backlog, specs, tests, and code in sync - Enforcing validation and contract checks before production - Supporting agile ceremonies and team workflows + +When a sibling ``specfact-cli-modules`` checkout exists, startup prepends each bundle's ``src`` +to ``sys.path`` so local development can load marketplace packages without installing wheels. """ from __future__ import annotations @@ -42,6 +45,6 @@ def _bootstrap_bundle_paths() -> None: _bootstrap_bundle_paths() -__version__ = "0.43.1" +__version__ = "0.43.2" __all__ = ["__version__"] diff --git a/tests/conftest.py b/tests/conftest.py index 44b4ee65..20434f96 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -139,3 +139,8 @@ def pytest_collection_modifyitems(config: object, items: list[object]) -> None: continue if _should_skip_migrated_test(rel): item.add_marker(skip_marker) + + +# Pytest 8+ / 9+: ``pytest_plugins`` is only allowed in the rootdir conftest (not in nested +# packages). Doc frontmatter fixtures live in ``tests.helpers.doc_frontmatter_fixtures``. +pytest_plugins = ("tests.helpers.doc_frontmatter_fixtures",) diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py new file mode 100644 index 00000000..26787be5 --- /dev/null +++ b/tests/helpers/__init__.py @@ -0,0 +1 @@ +"""Shared test helpers (importable from unit and integration tests).""" diff --git a/tests/helpers/doc_frontmatter.py b/tests/helpers/doc_frontmatter.py new file mode 100644 index 00000000..a033374c --- /dev/null +++ b/tests/helpers/doc_frontmatter.py @@ -0,0 +1,67 @@ +"""Shared helpers for doc frontmatter unit and integration tests.""" + +from __future__ import annotations + +import datetime +import importlib.util +from collections.abc import Callable +from pathlib import Path +from typing import cast + +from tests.helpers.doc_frontmatter_types import CheckDocFrontmatterModule + + +def repo_root() -> Path: + """Repository root (parent of ``tests/``).""" + return Path(__file__).resolve().parents[2] + + +def load_check_doc_frontmatter_module() -> CheckDocFrontmatterModule: + """Load ``scripts/check_doc_frontmatter.py`` without mutating ``sys.path``.""" + script_path = repo_root() / "scripts" / "check_doc_frontmatter.py" + spec = importlib.util.spec_from_file_location("check_doc_frontmatter", script_path) + if spec is None or spec.loader is None: + raise AssertionError(f"Unable to load module from {script_path}") + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + dfm = getattr(module, "DocFrontmatter", None) + if dfm is not None: + dfm.model_rebuild(_types_namespace={"datetime": datetime}) + return cast(CheckDocFrontmatterModule, module) + + +def validation_main_fn(mod: CheckDocFrontmatterModule) -> Callable[[list[str] | None], int]: + """Return ``check_doc_frontmatter.main`` with a concrete type for static analysis.""" + return mod.main + + +VALID_DOC_FRONTMATTER = """--- +title: "Valid Document" +doc_owner: src/test/module +tracks: + - src/test/** +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Valid content""" + + +def write_enforced(root: Path, *relative_paths: str) -> None: + """Write ``docs/.doc-frontmatter-enforced`` with the given repo-relative paths.""" + enforced = root / "docs" / ".doc-frontmatter-enforced" + enforced.parent.mkdir(parents=True, exist_ok=True) + enforced.write_text("\n".join(relative_paths) + "\n", encoding="utf-8") + + +def enforce_all_markdown_under_docs(root: Path) -> None: + """Populate enforced list with every ``docs/**/*.md`` path (except the enforced file itself).""" + lines: list[str] = [] + docs = root / "docs" + if docs.exists(): + for p in docs.rglob("*.md"): + if p.name == ".doc-frontmatter-enforced": + continue + lines.append(p.relative_to(root).as_posix()) + write_enforced(root, *lines) diff --git a/tests/helpers/doc_frontmatter_fixtures.py b/tests/helpers/doc_frontmatter_fixtures.py new file mode 100644 index 00000000..f3833306 --- /dev/null +++ b/tests/helpers/doc_frontmatter_fixtures.py @@ -0,0 +1,45 @@ +"""Shared pytest fixtures for doc frontmatter unit and integration tests. + +Registered from ``tests/conftest.py`` via ``pytest_plugins`` (Pytest 8+ requires a +root-level conftest for plugin lists; nested ``conftest.py`` cannot use +``pytest_plugins``). +""" + +from __future__ import annotations + +import pytest + +from tests.helpers.doc_frontmatter import load_check_doc_frontmatter_module +from tests.helpers.doc_frontmatter_types import CheckDocFrontmatterModule + + +_DOC_FRONTMATTER_PATH = "test_doc_frontmatter" + + +def pytest_runtest_setup(item: pytest.Item) -> None: + """Clear owner-resolution cache between doc-frontmatter tests (lru_cache isolation).""" + nodeid = getattr(item, "nodeid", "") or "" + path_str = "" + path = getattr(item, "path", None) + if path is not None: + path_str = str(path) + if _DOC_FRONTMATTER_PATH not in nodeid and _DOC_FRONTMATTER_PATH not in path_str: + return + mod = load_check_doc_frontmatter_module() + mod._resolve_owner_impl.cache_clear() + + +@pytest.fixture(scope="session") +def check_doc_frontmatter_module() -> CheckDocFrontmatterModule: + """Single loaded instance of ``scripts/check_doc_frontmatter.py`` (no ``sys.path`` hacks).""" + return load_check_doc_frontmatter_module() + + +@pytest.fixture(autouse=True) +def _clear_doc_frontmatter_root(request: pytest.FixtureRequest, monkeypatch: pytest.MonkeyPatch) -> None: + """Isolate ``DOC_FRONTMATTER_ROOT`` for doc-frontmatter tests only.""" + nodeid = getattr(request.node, "nodeid", "") or "" + path_str = str(getattr(request.node, "path", "")) + if _DOC_FRONTMATTER_PATH not in nodeid and _DOC_FRONTMATTER_PATH not in path_str: + return + monkeypatch.delenv("DOC_FRONTMATTER_ROOT", raising=False) diff --git a/tests/helpers/doc_frontmatter_types.py b/tests/helpers/doc_frontmatter_types.py new file mode 100644 index 00000000..2291a231 --- /dev/null +++ b/tests/helpers/doc_frontmatter_types.py @@ -0,0 +1,33 @@ +"""Typing helpers for dynamically loaded ``check_doc_frontmatter`` module.""" + +from __future__ import annotations + +from collections.abc import Callable +from pathlib import Path +from typing import Any, Protocol, runtime_checkable + + +@runtime_checkable +class ResolveOwnerImplWithCache(Protocol): + """Callable owner resolver exposing ``cache_clear`` like ``functools.lru_cache``.""" + + def __call__(self, owner: str, root_key: str) -> bool: ... + + def cache_clear(self) -> None: ... + + +@runtime_checkable +class CheckDocFrontmatterModule(Protocol): + """Structural type for ``scripts/check_doc_frontmatter.py`` loaded via importlib.""" + + DocFrontmatter: type + parse_frontmatter: Callable[[Path], dict[str, Any]] + resolve_owner: Callable[[str], bool] + validate_glob_patterns: Callable[[list[str]], bool] + suggest_frontmatter: Callable[[Path], str] + extract_doc_owner: Callable[[str], str | None] + get_all_md_files: Callable[[], list[Path]] + rg_missing_doc_owner: Callable[[list[Path]], list[Path]] + main: Callable[[list[str] | None], int] + datetime: Any + _resolve_owner_impl: ResolveOwnerImplWithCache diff --git a/tests/integration/scripts/test_doc_frontmatter/test_integration.py b/tests/integration/scripts/test_doc_frontmatter/test_integration.py new file mode 100644 index 00000000..0ce23333 --- /dev/null +++ b/tests/integration/scripts/test_doc_frontmatter/test_integration.py @@ -0,0 +1,254 @@ +#!/usr/bin/env python3 +"""Integration tests for doc frontmatter validation.""" + +from __future__ import annotations + +import re +import tempfile +from pathlib import Path + +import pytest + +from tests.helpers.doc_frontmatter import ( + VALID_DOC_FRONTMATTER, + enforce_all_markdown_under_docs, + validation_main_fn, + write_enforced, +) +from tests.helpers.doc_frontmatter_types import CheckDocFrontmatterModule + + +ANSI_ESCAPE_RE = re.compile(r"\x1b\[[0-9;]*[A-Za-z]") + + +class TestEndToEndWorkflow: + """Test complete end-to-end validation workflows.""" + + def test_complete_validation_workflow( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test the complete validation workflow with various file types.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "valid.md").write_text(VALID_DOC_FRONTMATTER) + (docs_dir / "invalid.md").write_text( + """--- +title: "Invalid Document" +--- + +# Missing required fields""" + ) + (docs_dir / "no_frontmatter.md").write_text("# No frontmatter at all") + (root / "src" / "test" / "module").mkdir(parents=True) + enforce_all_markdown_under_docs(root) + assert validation_main([]) == 1 + + def test_validation_with_all_valid_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test validation workflow when all files are valid.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + for i in range(3): + (docs_dir / f"valid{i}.md").write_text( + VALID_DOC_FRONTMATTER.replace("Valid Document", f"Valid Document {i}") + ) + (root / "src" / "test" / "module").mkdir(parents=True) + enforce_all_markdown_under_docs(root) + assert validation_main([]) == 0 + + +class TestMultipleFileScenarios: + """Test validation with multiple files and complex scenarios.""" + + def test_large_number_of_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test performance with a large number of files.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + for i in range(50): + if i % 3 == 0: + (docs_dir / f"file{i}.md").write_text( + f"""--- +title: "File {i}" +--- + +# Missing doc_owner""" + ) + else: + (docs_dir / f"file{i}.md").write_text(VALID_DOC_FRONTMATTER.replace("Valid Document", f"File {i}")) + (root / "src" / "test" / "module").mkdir(parents=True) + enforce_all_markdown_under_docs(root) + assert validation_main([]) == 1 + + def test_nested_directory_structure( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test with nested directory structures.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "level1" / "level2" / "level3").mkdir(parents=True) + for rel in ( + "root.md", + "level1/level1.md", + "level1/level2/level2.md", + "level1/level2/level3/level3.md", + ): + (docs_dir / rel).write_text(VALID_DOC_FRONTMATTER.replace("Valid Document", rel)) + (root / "src" / "test" / "module").mkdir(parents=True) + enforce_all_markdown_under_docs(root) + assert validation_main([]) == 0 + + +class TestScaleSmoke: + """Scale / smoke tests: many files and large bodies (not benchmark assertions).""" + + def test_execution_time_with_many_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Many small valid docs complete successfully (smoke, not timing assertions).""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + for i in range(100): + (docs_dir / f"file{i}.md").write_text(VALID_DOC_FRONTMATTER.replace("Valid Document", f"File {i}")) + (root / "src" / "test" / "module").mkdir(parents=True) + enforce_all_markdown_under_docs(root) + assert validation_main([]) == 0 + + def test_memory_usage_with_large_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Large Markdown bodies still validate (smoke, not memory instrumentation).""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + large_content = "# Large content\n" * 1000 + for i in range(10): + body = VALID_DOC_FRONTMATTER.replace("Valid Document", f"Large File {i}") + "\n" + large_content + (docs_dir / f"large{i}.md").write_text(body) + (root / "src" / "test" / "module").mkdir(parents=True) + enforce_all_markdown_under_docs(root) + assert validation_main([]) == 0 + + +class TestCommandLineInterface: + """Test command-line interface functionality.""" + + def test_cli_with_fix_hint_flag( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test CLI with --fix-hint flag.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "invalid.md").write_text("# Missing frontmatter") + write_enforced(root, "docs/invalid.md") + assert validation_main(["--fix-hint"]) == 1 + + def test_cli_help_output( + self, capsys: pytest.CaptureFixture[str], check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test CLI help output.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + assert validation_main(["--help"]) == 0 + captured = capsys.readouterr() + combined = ANSI_ESCAPE_RE.sub("", captured.out + captured.err).lower() + assert "usage" in combined or "help" in combined + assert "--fix-hint" in combined + + +class TestRealWorldScenarios: + """Test real-world usage scenarios.""" + + def test_mixed_exempt_and_regular_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test scenario with both exempt and regular files.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "LICENSE.md").write_text("# License") + (docs_dir / "exempt.md").write_text( + """--- +title: "Exempt" +exempt: true +exempt_reason: "Legal document" +--- + +# Exempt content""" + ) + (docs_dir / "regular.md").write_text(VALID_DOC_FRONTMATTER) + (docs_dir / "invalid.md").write_text( + """--- +title: "Invalid" +--- + +# Missing doc_owner""" + ) + (root / "src" / "test" / "module").mkdir(parents=True) + write_enforced(root, "docs/regular.md", "docs/invalid.md", "docs/exempt.md") + assert validation_main([]) == 1 + + def test_complex_tracking_patterns( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test with complex glob patterns in tracks field.""" + validation_main = validation_main_fn(check_doc_frontmatter_module) + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "complex1.md").write_text( + """--- +title: "Complex Patterns" +doc_owner: src/test/module +tracks: + - src/**/*.py + - tests/**/test_*.py + - docs/**/*.md + - "**/excluded/**" +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Complex tracking patterns""" + ) + (root / "src" / "test" / "module").mkdir(parents=True) + write_enforced(root, "docs/complex1.md") + assert validation_main([]) == 0 + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/unit/scripts/test_code_review_module_docs.py b/tests/unit/scripts/test_code_review_module_docs.py index d2b90f6e..87542316 100644 --- a/tests/unit/scripts/test_code_review_module_docs.py +++ b/tests/unit/scripts/test_code_review_module_docs.py @@ -11,7 +11,12 @@ def test_code_review_docs_cover_pre_commit_gate_and_portable_adoption() -> None: docs = _docs_text() assert "## Pre-Commit Review Gate" in docs assert ".pre-commit-config.yaml" in docs - assert "specfact code review run --score-only" in docs + assert "specfact code review run" in docs + assert ".specfact/code-review.json" in docs + assert "verbose: true" in docs + assert "Verdict line, report file, and Copilot" in docs + assert "Code review summary" in docs + assert "copilot" in docs.lower() assert "## Add to Any Project" in docs diff --git a/tests/unit/scripts/test_doc_frontmatter/test_schema.py b/tests/unit/scripts/test_doc_frontmatter/test_schema.py new file mode 100644 index 00000000..0f0e2029 --- /dev/null +++ b/tests/unit/scripts/test_doc_frontmatter/test_schema.py @@ -0,0 +1,224 @@ +#!/usr/bin/env python3 +""" +Test suite for doc frontmatter schema functionality. +Tests the YAML frontmatter parsing and validation logic. +""" + +from __future__ import annotations + +import datetime +import types +from pathlib import Path + +import pytest + +from tests.helpers.doc_frontmatter_types import CheckDocFrontmatterModule + + +class TestFrontmatterParsing: + """Test YAML frontmatter parsing functionality.""" + + def test_valid_frontmatter_parsing( + self, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test parsing valid YAML frontmatter.""" + parse_frontmatter = check_doc_frontmatter_module.parse_frontmatter + content = """--- +title: "Test Document" +doc_owner: src/test/module +tracks: + - src/test/** +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Document content here""" + + test_file = tmp_path / "test.md" + test_file.write_text(content, encoding="utf-8") + result = parse_frontmatter(test_file) + assert result is not None + assert "title" in result + assert "doc_owner" in result + assert "tracks" in result + assert result["title"] == "Test Document" + assert result["doc_owner"] == "src/test/module" + assert result["tracks"] == ["src/test/**"] + + def test_missing_required_fields( + self, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test detection of missing required fields.""" + parse_frontmatter = check_doc_frontmatter_module.parse_frontmatter + content = """--- +title: "Incomplete Document" +--- + +# Document content here""" + + test_file = tmp_path / "incomplete.md" + test_file.write_text(content, encoding="utf-8") + result = parse_frontmatter(test_file) + assert result is not None + + def test_no_frontmatter(self, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test handling of files without frontmatter.""" + parse_frontmatter = check_doc_frontmatter_module.parse_frontmatter + content = "# Document without frontmatter" + + test_file = tmp_path / "plain.md" + test_file.write_text(content, encoding="utf-8") + result = parse_frontmatter(test_file) + assert result == {} + + +class TestOwnerResolution: + """Test owner identifier resolution functionality.""" + + def test_token_owner_resolution( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Known token ``openspec`` resolves without filesystem lookup.""" + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(tmp_path)) + resolve_owner = check_doc_frontmatter_module.resolve_owner + result = resolve_owner("openspec") + assert result is True + + def test_filesystem_owner_resolution( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Path-like owner resolves when the directory exists under DOC_FRONTMATTER_ROOT.""" + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(tmp_path)) + (tmp_path / "src" / "fs_owner_pkg").mkdir(parents=True) + resolve_owner = check_doc_frontmatter_module.resolve_owner + assert resolve_owner("src/fs_owner_pkg") is True + + def test_known_token_resolution( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test resolution of known owner tokens.""" + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(tmp_path)) + resolve_owner = check_doc_frontmatter_module.resolve_owner + result = resolve_owner("specfact-cli") + assert result is True + + def test_invalid_owner_resolution( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test handling of invalid owner identifiers.""" + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(tmp_path)) + resolve_owner = check_doc_frontmatter_module.resolve_owner + result = resolve_owner("nonexistent-owner") + assert result is False + + def test_owner_rejects_traversal_and_glob( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Unsafe owner literals do not resolve.""" + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(tmp_path)) + (tmp_path / "safe").mkdir() + resolve_owner = check_doc_frontmatter_module.resolve_owner + assert resolve_owner("../safe") is False + assert resolve_owner("**") is False + assert resolve_owner("") is False + + +class TestDocFrontmatterModel: + """Pydantic model for validated ownership records.""" + + def test_model_validate_accepts_minimal_valid_dict( + self, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + doc_frontmatter_model = check_doc_frontmatter_module.DocFrontmatter + data = { + "title": "T", + "doc_owner": "specfact-cli", + "tracks": ["src/**"], + "last_reviewed": "2026-01-01", + "exempt": False, + "exempt_reason": "", + } + rec = doc_frontmatter_model.model_validate(data) + assert rec.title == "T" + assert rec.doc_owner == "specfact-cli" + assert rec.tracks == ["src/**"] + assert str(rec.last_reviewed) == "2026-01-01" + + +class TestGlobPatternValidation: + """Test glob pattern validation functionality.""" + + def test_valid_glob_patterns(self, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test validation of valid glob patterns.""" + validate_glob_patterns = check_doc_frontmatter_module.validate_glob_patterns + patterns = ["src/test/**", "docs/**/*.md", "*.py"] + result = validate_glob_patterns(patterns) + assert result is True + + def test_invalid_glob_patterns(self, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test detection of invalid glob patterns.""" + validate_glob_patterns = check_doc_frontmatter_module.validate_glob_patterns + patterns = ["src/test/[", "invalid{{pattern"] + result = validate_glob_patterns(patterns) + assert result is False + + +class TestFrontmatterSuggestions: + """Test frontmatter suggestion generation.""" + + def test_suggest_frontmatter_template( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test generation of suggested frontmatter template.""" + fixed = datetime.date(2026, 3, 15) + + class _FakeDate: + @staticmethod + def today() -> datetime.date: + return fixed + + _fake_dt = types.ModuleType("datetime") + _fake_dt.date = _FakeDate + monkeypatch.setattr(check_doc_frontmatter_module, "datetime", _fake_dt) + suggest_frontmatter = check_doc_frontmatter_module.suggest_frontmatter + path = Path("test-document.md") + suggestion = suggest_frontmatter(path) + + assert suggestion is not None + assert "---" in suggestion + assert "title:" in suggestion + assert "doc_owner:" in suggestion + assert "tracks:" in suggestion + assert "last_reviewed:" in suggestion + assert fixed.isoformat() in suggestion + assert "exempt:" in suggestion + + +class TestExtractDocOwner: + """Test doc_owner extraction functionality.""" + + def test_extract_valid_owner(self, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test extraction of valid doc_owner from content.""" + extract_doc_owner = check_doc_frontmatter_module.extract_doc_owner + content = """--- +title: "Test" +doc_owner: src/test/module +---""" + + result = extract_doc_owner(content) + assert result == "src/test/module" + + def test_extract_missing_owner(self, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test handling of missing doc_owner.""" + extract_doc_owner = check_doc_frontmatter_module.extract_doc_owner + content = """--- +title: "Test" +---""" + + result = extract_doc_owner(content) + assert result is None + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/unit/scripts/test_doc_frontmatter/test_validation.py b/tests/unit/scripts/test_doc_frontmatter/test_validation.py new file mode 100644 index 00000000..c04244d8 --- /dev/null +++ b/tests/unit/scripts/test_doc_frontmatter/test_validation.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python3 +"""Tests for doc frontmatter validation (scripts/check_doc_frontmatter.py).""" + +from __future__ import annotations + +import tempfile +from pathlib import Path + +import pytest + +from tests.helpers.doc_frontmatter import write_enforced +from tests.helpers.doc_frontmatter_types import CheckDocFrontmatterModule + + +class TestFileDiscovery: + """Test Markdown file discovery functionality.""" + + def test_discover_docs_directory_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test discovery of files in docs directory.""" + get_all_md_files = check_doc_frontmatter_module.get_all_md_files + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "test1.md").write_text("# Test 1") + (docs_dir / "test2.md").write_text("---\ntitle: Test\n---\n# Test 2") + (docs_dir / "subdir").mkdir() + (docs_dir / "subdir" / "test3.md").write_text("# Test 3") + files = get_all_md_files() + assert len(files) == 3 + + def test_exempt_files_exclusion( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test that exempt files are excluded from discovery.""" + get_all_md_files = check_doc_frontmatter_module.get_all_md_files + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "regular.md").write_text("# Regular") + (docs_dir / "LICENSE.md").write_text("# License") + (docs_dir / "exempt.md").write_text( + "---\ntitle: Test\nexempt: true\nexempt_reason: Test reason\n---\n# Exempt" + ) + files = get_all_md_files() + assert len(files) == 1 + assert "regular.md" in str(files[0]) + + +class TestMissingDocOwnerDetection: + """Test detection of missing doc_owner fields.""" + + def test_missing_doc_owner_detection(self, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test detection of files missing doc_owner.""" + rg_missing_doc_owner = check_doc_frontmatter_module.rg_missing_doc_owner + with tempfile.TemporaryDirectory() as temp_dir: + docs_dir = Path(temp_dir) / "docs" + docs_dir.mkdir() + (docs_dir / "with_owner.md").write_text("---\ntitle: Test\ndoc_owner: test\n---\n# Content") + (docs_dir / "without_owner.md").write_text("---\ntitle: Test\n---\n# Content") + (docs_dir / "no_frontmatter.md").write_text("# No frontmatter") + files = [ + docs_dir / "with_owner.md", + docs_dir / "without_owner.md", + docs_dir / "no_frontmatter.md", + ] + missing_owner = rg_missing_doc_owner(files) + assert len(missing_owner) == 2 + assert docs_dir / "without_owner.md" in missing_owner + assert docs_dir / "no_frontmatter.md" in missing_owner + assert docs_dir / "with_owner.md" not in missing_owner + + def test_all_files_have_owner(self, check_doc_frontmatter_module: CheckDocFrontmatterModule) -> None: + """Test when all files have doc_owner.""" + rg_missing_doc_owner = check_doc_frontmatter_module.rg_missing_doc_owner + with tempfile.TemporaryDirectory() as temp_dir: + docs_dir = Path(temp_dir) / "docs" + docs_dir.mkdir() + (docs_dir / "file1.md").write_text("---\ntitle: Test\ndoc_owner: test\n---\n# Content") + (docs_dir / "file2.md").write_text("---\ntitle: Test\ndoc_owner: test\n---\n# Content") + files = [docs_dir / "file1.md", docs_dir / "file2.md"] + missing_owner = rg_missing_doc_owner(files) + assert len(missing_owner) == 0 + + +class TestValidationMainFunction: + """Test the main validation function.""" + + def test_validation_with_valid_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test validation with all valid files.""" + validation_main = check_doc_frontmatter_module.main + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "valid1.md").write_text( + """--- +title: "Valid Document" +doc_owner: src/test/module +tracks: + - src/test/** +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Valid content""" + ) + (root / "src" / "test" / "module").mkdir(parents=True) + write_enforced(root, "docs/valid1.md") + result = validation_main([]) + assert result == 0 + + def test_validation_with_invalid_files( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test validation with invalid files.""" + validation_main = check_doc_frontmatter_module.main + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "invalid.md").write_text( + """--- +title: "Invalid Document" +--- + +# Missing doc_owner""" + ) + write_enforced(root, "docs/invalid.md") + result = validation_main([]) + assert result == 1 + + +class TestOwnerResolutionValidation: + """Test owner resolution validation.""" + + def test_valid_owner_resolution( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test validation with valid owner resolution.""" + validation_main = check_doc_frontmatter_module.main + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "valid.md").write_text( + """--- +title: "Valid Document" +doc_owner: src/test/module +tracks: + - src/test/** +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Content""" + ) + (root / "src" / "test" / "module").mkdir(parents=True) + write_enforced(root, "docs/valid.md") + result = validation_main([]) + assert result == 0 + + def test_invalid_owner_resolution( + self, monkeypatch: pytest.MonkeyPatch, check_doc_frontmatter_module: CheckDocFrontmatterModule + ) -> None: + """Test validation with invalid owner resolution.""" + validation_main = check_doc_frontmatter_module.main + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "invalid.md").write_text( + """--- +title: "Invalid Document" +doc_owner: nonexistent/owner +tracks: + - src/test/** +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Content""" + ) + write_enforced(root, "docs/invalid.md") + result = validation_main([]) + assert result == 1 + + +class TestFixHintGeneration: + """Test fix hint generation functionality.""" + + def test_fix_hint_for_missing_frontmatter( + self, + monkeypatch: pytest.MonkeyPatch, + capsys: pytest.CaptureFixture[str], + check_doc_frontmatter_module: CheckDocFrontmatterModule, + ) -> None: + """Test fix hint generation for missing frontmatter.""" + validation_main = check_doc_frontmatter_module.main + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "no_frontmatter.md").write_text("# Document without frontmatter") + write_enforced(root, "docs/no_frontmatter.md") + result = validation_main(["--fix-hint"]) + assert result == 1 + captured = capsys.readouterr() + assert "MISSING doc_owner" in captured.err + assert "Suggested frontmatter" in captured.err or "---" in captured.err + + def test_fix_hint_for_invalid_owner( + self, + monkeypatch: pytest.MonkeyPatch, + capsys: pytest.CaptureFixture[str], + check_doc_frontmatter_module: CheckDocFrontmatterModule, + ) -> None: + """Test fix hint generation for invalid owner.""" + validation_main = check_doc_frontmatter_module.main + with tempfile.TemporaryDirectory() as temp_dir: + root = Path(temp_dir) + monkeypatch.setenv("DOC_FRONTMATTER_ROOT", str(root)) + docs_dir = root / "docs" + docs_dir.mkdir() + (docs_dir / "invalid.md").write_text( + """--- +title: "Invalid Document" +doc_owner: invalid/owner +tracks: + - src/** +last_reviewed: 2026-03-20 +exempt: false +exempt_reason: "" +--- + +# Content""" + ) + write_enforced(root, "docs/invalid.md") + result = validation_main(["--fix-hint"]) + assert result == 1 + captured = capsys.readouterr() + assert "INVALID" in captured.err or "does not resolve" in captured.err + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/unit/scripts/test_pre_commit_code_review.py b/tests/unit/scripts/test_pre_commit_code_review.py index 03ec8c38..a3e1400d 100644 --- a/tests/unit/scripts/test_pre_commit_code_review.py +++ b/tests/unit/scripts/test_pre_commit_code_review.py @@ -5,6 +5,7 @@ from __future__ import annotations import importlib.util +import json import subprocess import sys from pathlib import Path @@ -34,14 +35,16 @@ def test_filter_review_files_keeps_only_python_sources() -> None: ] -def test_build_review_command_uses_score_only_mode() -> None: - """Pre-commit gate should rely on score-only exit-code semantics.""" +def test_build_review_command_writes_json_report() -> None: + """Pre-commit gate should write ReviewReport JSON for IDE/Copilot and use exit verdict.""" module = _load_script_module() command = module.build_review_command(["src/app.py", "tests/test_app.py"]) assert command[:5] == [sys.executable, "-m", "specfact_cli.cli", "code", "review"] - assert "--score-only" in command + assert "--json" in command + assert "--out" in command + assert module.REVIEW_JSON_OUT in command assert command[-2:] == ["src/app.py", "tests/test_app.py"] @@ -55,23 +58,160 @@ def test_main_skips_when_no_relevant_files(capsys: pytest.CaptureFixture[str]) - assert "No staged Python files" in capsys.readouterr().out -def test_main_propagates_review_gate_exit_code(monkeypatch: pytest.MonkeyPatch) -> None: +def test_main_propagates_review_gate_exit_code( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path, capsys: pytest.CaptureFixture[str] +) -> None: """Blocking review verdicts must block the commit by returning non-zero.""" module = _load_script_module() + repo_root = tmp_path + payload = { + "overall_verdict": "FAIL", + "findings": [ + {"severity": "error", "rule": "e1"}, + {"severity": "warning", "rule": "w1"}, + ], + } + + def _fake_root() -> Path: + return repo_root + + def _fake_ensure() -> tuple[bool, str | None]: + return True, None + + def _fake_run(cmd: list[str], **kwargs: object) -> subprocess.CompletedProcess[str]: + assert "--json" in cmd + assert module.REVIEW_JSON_OUT in cmd + assert kwargs.get("cwd") == str(repo_root) + assert kwargs.get("timeout") == 300 + _write_sample_review_report(repo_root, payload) + return subprocess.CompletedProcess(cmd, 1, stdout=".specfact/code-review.json\n", stderr="") + + monkeypatch.setattr(module, "_repo_root", _fake_root) + monkeypatch.setattr(module, "ensure_runtime_available", _fake_ensure) + monkeypatch.setattr(module.subprocess, "run", _fake_run) + + exit_code = module.main(["src/app.py"]) + + assert exit_code == 1 + captured = capsys.readouterr() + assert captured.out == "" + err = captured.err + assert "Code review summary: 2 finding(s)" in err + assert "errors=1" in err + assert "warnings=1" in err + assert "overall_verdict='FAIL'" in err + assert "Code review report file:" in err + assert "absolute path:" in err + assert "Copy-paste for Copilot or Cursor:" in err + assert "Read `.specfact/code-review.json`" in err + assert "@workspace Open `.specfact/code-review.json`" in err + + +def _write_sample_review_report(repo_root: Path, payload: dict[str, object]) -> None: + spec_dir = repo_root / ".specfact" + spec_dir.mkdir(parents=True, exist_ok=True) + (spec_dir / "code-review.json").write_text(json.dumps(payload), encoding="utf-8") + + +def test_count_findings_by_severity_buckets_unknown() -> None: + """Severities map to error/warning/advisory; others go to other.""" + module = _load_script_module() + counts = module._count_findings_by_severity( + [ + {"severity": "error"}, + {"severity": "WARN"}, + {"severity": "advisory"}, + {"severity": "info"}, + {"severity": "custom"}, + "not-a-dict", + ] + ) + assert counts == {"error": 1, "warning": 1, "advisory": 1, "info": 1, "other": 2} + + +def test_main_missing_report_still_returns_exit_code_and_warns( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path, capsys: pytest.CaptureFixture[str] +) -> None: + """If JSON is not on disk, stderr explains; exit code still comes from the review subprocess.""" + module = _load_script_module() + + def _fake_root() -> Path: + return tmp_path + + def _fake_ensure() -> tuple[bool, str | None]: + return True, None + + def _fake_run(cmd: list[str], **kwargs: object) -> subprocess.CompletedProcess[str]: + return subprocess.CompletedProcess(cmd, 2, stdout="", stderr="") + + monkeypatch.setattr(module, "_repo_root", _fake_root) + monkeypatch.setattr(module, "ensure_runtime_available", _fake_ensure) + monkeypatch.setattr(module.subprocess, "run", _fake_run) + + exit_code = module.main(["src/app.py"]) + + assert exit_code == 2 + err = capsys.readouterr().err + assert "no report file" in err + assert ".specfact/code-review.json" in err + + +def test_main_timeout_fails_hook(monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]) -> None: + """Subprocess timeout must fail the hook with a clear message.""" + module = _load_script_module() + repo_root = Path(__file__).resolve().parents[3] + + def _fake_root() -> Path: + return repo_root def _fake_ensure() -> tuple[bool, str | None]: return True, None - def _fake_run(cmd: list[str], **_: object) -> subprocess.CompletedProcess[str]: - assert "--score-only" in cmd - return subprocess.CompletedProcess(cmd, 1, stdout="-7\n", stderr="") + def _fake_run(cmd: list[str], **kwargs: object) -> subprocess.CompletedProcess[str]: + assert kwargs.get("cwd") == str(repo_root) + assert kwargs.get("timeout") == 300 + raise subprocess.TimeoutExpired(cmd, 300) + monkeypatch.setattr(module, "_repo_root", _fake_root) monkeypatch.setattr(module, "ensure_runtime_available", _fake_ensure) monkeypatch.setattr(module.subprocess, "run", _fake_run) exit_code = module.main(["src/app.py"]) assert exit_code == 1 + err = capsys.readouterr().err + assert "timed out after 300s" in err + assert "src/app.py" in err + + +def test_print_summary_rejects_non_object_json( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path, capsys: pytest.CaptureFixture[str] +) -> None: + """Top-level JSON must be an object so findings can be summarized safely.""" + module = _load_script_module() + repo_root = tmp_path + + def _fake_root() -> Path: + return repo_root + + def _fake_ensure() -> tuple[bool, str | None]: + return True, None + + def _fake_run(cmd: list[str], **kwargs: object) -> subprocess.CompletedProcess[str]: + spec_dir = repo_root / ".specfact" + spec_dir.mkdir(parents=True, exist_ok=True) + (spec_dir / "code-review.json").write_text(json.dumps(["not", "an", "object"]), encoding="utf-8") + return subprocess.CompletedProcess(cmd, 0, stdout="", stderr="") + + monkeypatch.setattr(module, "_repo_root", _fake_root) + monkeypatch.setattr(module, "ensure_runtime_available", _fake_ensure) + monkeypatch.setattr(module.subprocess, "run", _fake_run) + + exit_code = module.main(["src/app.py"]) + + assert exit_code == 0 + err = capsys.readouterr().err + assert "expected a JSON object" in err def test_main_prints_actionable_setup_guidance_when_runtime_missing( diff --git a/tests/unit/specfact_cli/registry/test_signing_artifacts.py b/tests/unit/specfact_cli/registry/test_signing_artifacts.py index 47d520f6..45feb6a4 100644 --- a/tests/unit/specfact_cli/registry/test_signing_artifacts.py +++ b/tests/unit/specfact_cli/registry/test_signing_artifacts.py @@ -6,8 +6,10 @@ import re from pathlib import Path +from typing import Any, cast import pytest +import yaml REPO_ROOT = Path(__file__).resolve().parents[4] @@ -19,6 +21,23 @@ PUBLISH_PYPI_SCRIPT = REPO_ROOT / ".github" / "workflows" / "scripts" / "check-and-publish-pypi.sh" +def _load_pr_orchestrator_jobs() -> dict[str, dict[str, Any]]: + """Return the parsed jobs mapping for the PR orchestrator workflow.""" + if not PR_ORCHESTRATOR_WORKFLOW.exists(): + pytest.skip("pr-orchestrator workflow not present") + data = yaml.safe_load(PR_ORCHESTRATOR_WORKFLOW.read_text(encoding="utf-8")) + assert isinstance(data, dict), "Expected mapping at pr-orchestrator workflow root" + workflow_root = cast(dict[str, Any], data) + jobs = workflow_root.get("jobs") + assert isinstance(jobs, dict), "Expected jobs mapping in pr-orchestrator workflow" + typed_jobs: dict[str, dict[str, Any]] = {} + for name, job in jobs.items(): + assert isinstance(name, str), "Expected string job names in pr-orchestrator workflow" + assert isinstance(job, dict), f"Expected mapping definition for job {name}" + typed_jobs[name] = job + return typed_jobs + + def test_sign_module_script_exists(): """Signing script scripts/sign-module.sh SHALL exist.""" assert SIGN_SCRIPT.exists(), "scripts/sign-module.sh must exist for signing automation" @@ -518,6 +537,49 @@ def test_pr_orchestrator_pins_virtualenv_below_21_for_hatch_jobs(): assert "virtualenv<21" in command, f"Missing virtualenv<21 pin in command: {command}" +@pytest.mark.parametrize( + ("job_name", "required_needs"), + ( + ("compat-py311", {"changes", "verify-module-signatures"}), + ("contract-first-ci", {"changes", "verify-module-signatures"}), + ("type-checking", {"changes", "verify-module-signatures"}), + ("linting", {"changes", "verify-module-signatures"}), + ("cli-validation", {"changes", "verify-module-signatures"}), + ), +) +def test_pr_orchestrator_independent_jobs_do_not_wait_for_tests( + job_name: str, + required_needs: set[str], +) -> None: + """Independent validation jobs SHALL start after the shared signature gate, not after tests.""" + jobs = _load_pr_orchestrator_jobs() + job = jobs.get(job_name) + assert job is not None, f"Missing {job_name} job" + needs = job.get("needs") + assert isinstance(needs, list), f"Expected list needs for {job_name}" + assert set(needs) == required_needs + assert "tests" not in needs + + +def test_pr_orchestrator_quality_gates_still_depends_on_tests_for_coverage() -> None: + """Coverage-based advisory gate SHALL retain the tests dependency.""" + jobs = _load_pr_orchestrator_jobs() + job = jobs.get("quality-gates") + assert job is not None, "Missing quality-gates job" + needs = job.get("needs") + assert isinstance(needs, list), "Expected list needs for quality-gates" + assert set(needs) == {"changes", "tests"} + + +def test_pr_orchestrator_cache_paths_do_not_restore_hatch_virtualenvs() -> None: + """PR orchestrator SHALL cache package downloads, not Hatch virtualenv directories.""" + if not PR_ORCHESTRATOR_WORKFLOW.exists(): + pytest.skip("pr-orchestrator workflow not present") + content = PR_ORCHESTRATOR_WORKFLOW.read_text(encoding="utf-8") + assert "~/.cache/uv" in content + assert "~/.local/share/hatch" not in content + + def test_publish_script_pins_virtualenv_below_21_for_hatch_build(): """PyPI publish script SHALL pin virtualenv<21 when installing hatch.""" if not PUBLISH_PYPI_SCRIPT.exists():