diff --git a/.claude/rules/python/hooks.md b/.claude/rules/python/hooks.md index ee37470..15170e5 100644 --- a/.claude/rules/python/hooks.md +++ b/.claude/rules/python/hooks.md @@ -79,6 +79,18 @@ the same scope; running both would warn and then block on the same condition. Both source/test hooks only check top-level package modules (`src//.py`, excluding `__init__.py`). Nested packages are skipped. +### How to swap to the warn-only reminder + +The default strict hook (`pre-write-src-require-test.sh`) blocks any write to +`src//.py` when the matching test file is missing. If you prefer a non-blocking +reminder, swap the registration in `.claude/settings.json`: + +1. Locate the `PreToolUse` entry whose `command` is + `bash .claude/hooks/pre-write-src-require-test.sh`. +2. Replace `pre-write-src-require-test.sh` with `pre-write-src-test-reminder.sh` in that + entry. +3. Register only one at a time. Registering both produces duplicate output on every write. + ## Refactor test guard (PostToolUse) | Hook | Trigger | What it does | diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 1cf5be4..17559e1 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,6 +2,10 @@ Thank you for opening a pull request! Please provide clear and complete information to help reviewers understand, review, and merge your changes efficiently. + +Tip: run `just pr-draft` from the repo root to print a Conventional-Commits title +(e.g. chore/foo-bar → chore: foo bar) and a body with *Changes introduced* filled +from `git log`. Use `gh pr edit` to apply, or copy into the GitHub UI. --> ## Summary diff --git a/.gitignore b/.gitignore index c53b0a9..b406bc4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,9 @@ -# Local ignores +# -------------------------------------------------------------------------- +# Local developer ignores (not enforced in CI) +# -------------------------------------------------------------------------- runner.sh full_runner.sh + # ========================================================================== # Python Core # ========================================================================== @@ -75,23 +78,11 @@ instance/ # Scrapy stuff: .scrapy -# Sphinx documentation +# Sphinx / built HTML under docs/ docs/_build/ -docs/* - -# Allow the freshness dashboard report to be committed. -!docs/ -!docs/repo_file_status_report.md -!docs/git-history-maintenance.md -!docs/root-template-sync-policy.md -!docs/root-template-sync-map.yaml -# Copier template ships MkDocs sources under template/docs/ (must not match the rule above). -!template/docs/ -!template/docs/** - -# Repo freshness dashboard (generated + committed) -!docs/repo_file_status_report.md +# Local scratch / moved working copies (not committed) +temp_docs/ # PyBuilder target/ @@ -242,8 +233,5 @@ data/ .claude/todos/ .claude/.refactor-edit-count -# Stray root file (accidental) -/1 - # Cursor Related files .cursor/ diff --git a/.vscode/settings.json b/.vscode/settings.json index f43b5b0..7a859ca 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -90,5 +90,14 @@ } }, "python.languageServer": "None", - "python.analysis.typeCheckingMode": "off" + "python.analysis.typeCheckingMode": "off", + // ============================================================ + // macOS: silence uv "Ignoring `SSL_CERT_DIR`" when the variable + // points at a directory with no PEM certs (e.g. /etc/ssl/certs). + // Empty value lets uv use its default trust store. If you rely on + // a custom bundle, prefer SSL_CERT_FILE to a single PEM file instead. + // ============================================================ + "terminal.integrated.env.osx": { + "SSL_CERT_DIR": "" + } } diff --git a/CLAUDE.md b/CLAUDE.md index f1b079b..e43049e 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -23,13 +23,17 @@ destination folder. ├── tests/ # pytest tests that render the template and assert output │ ├── test_template.py # Main integration suite — copier copy + assertions │ ├── test_root_template_sync.py # Tests for check_root_template_sync.py -│ └── test_repo_file_freshness.py # Unit tests for repo_file_freshness.py script -├── scripts/ # Automation scripts for CI or local tasks +│ ├── test_repo_file_freshness.py # Unit tests for repo_file_freshness.py +│ ├── test_pr_commit_policy.py # PR body + conventional commit rules +│ ├── test_bump_version.py # Version bump + pyproject I/O +│ ├── test_sync_skip_if_exists.py # copier.yml _skip_if_exists helpers +│ └── test_check_root_template_sync.py # CLI smoke (see test_root_template_sync for scenarios) +├── scripts/ # Automation scripts for CI or local tasks (see scripts/CLAUDE.md) │ ├── repo_file_freshness.py # Git-based freshness dashboard (→ docs/ + assets/) │ ├── bump_version.py # PEP 440 version bumper (patch/minor/major) │ ├── check_root_template_sync.py # Root ↔ template parity (workflows, settings, recipes) -│ ├── sync_skip_if_exists.py # Sync copier.yml _skip_if_exists with template paths -│ └── update_files.sh # Batch file update helper +│ ├── pr_commit_policy.py # PR title/body + commit message policy (CI) +│ └── sync_skip_if_exists.py # Sync copier.yml _skip_if_exists with template paths ├── .claude/ # Claude Code hooks, commands, and rules for THIS meta-repo │ ├── settings.json # Hook registrations and permission allow/deny lists │ ├── hooks/ # Shell hook scripts (see hooks/README.md) @@ -58,24 +62,38 @@ Prerequisites: Python 3.11+, `uv`, `just`, `git`. | Task | Command | |---|---| +| List recipes (default) | `just` or `just default` | | Run all tests | `just test` | | Run tests in parallel | `just test-parallel` | +| Run slow tests only | `just slow` | +| Verbose tests | `just test-verbose` | +| Full debug test output | `just test-debug` | +| Re-run last failed tests | `just test-lf` | +| Stop on first test failure | `just test-first-fail` | +| CI-style tests + coverage XML | `just test-ci` | | Coverage report | `just coverage` | | Lint | `just lint` | | Format | `just fmt` | +| Format check (read-only) | `just fmt-check` | | Auto-fix lint issues | `just fix` | | Type check | `just type` | | Docstring check | `just docs-check` | +| MkDocs recipes (generated projects only) | `just docs-help` | | Pre-merge review | `just review` | | Full CI locally | `just ci` | | Read-only CI check (no auto-fix) | `just ci-check` | | Static checks only (fix+fmt+lint+type+docs) | `just static_check` | +| Run pre-commit on all files | `just precommit` | +| Register git hooks | `just precommit-install` | +| Interactive conventional commit (Commitizen) | `just cz-commit` | | Sync deps after lockfile change | `just sync` | | Upgrade all deps | `just update` | | Dependency security audit | `just audit` | | Install all deps + pre-commit | `just install` | | Diagnose environment | `just doctor` | | Generate freshness dashboard | `just freshness` | +| Root ↔ template sync validation | `just sync-check` | +| Suggested PR title + body (PR policy) | `just pr-draft` | | Clean build artifacts | `just clean` | | Build distribution | `just build` | | Publish package | `just publish` | diff --git a/docs/file_locking_plan.md b/docs/file_locking_plan.md new file mode 100644 index 0000000..a543de3 --- /dev/null +++ b/docs/file_locking_plan.md @@ -0,0 +1,949 @@ +# File Locking Implementation Plan (Claude Code only) + +> **Audience:** A junior model (Haiku) executing the steps below. +> **Source repo:** `/Users/kzqr495/Documents/workspace/python_starter_template` +> **Goal:** Prevent AI agents from modifying stable files without explicit user intent. +> **Approach:** Defense in depth using only Claude Code's native mechanisms — no external +> tools, no git pre-receive hooks, no CODEOWNERS automation. + +## How to use this document + +1. Read the entire document before touching files. +2. Execute the phases in order. Each phase is independently committable. +3. After every phase, run the validation block. If validation fails, STOP and report. +4. Conventional Commit format for every commit. +5. Do not skip Phase 0 (manifest) — every later phase depends on it. + +## Conceptual model — what we are building + +Three layers of protection backed by **one manifest**: + +``` +.claude/locked-files.txt ← single source of truth (one path/glob per line) + │ + ├── consumed by hook → .claude/hooks/pre-write-locked-files.sh (Write|Edit|MultiEdit) + ├── consumed by hook → .claude/hooks/pre-bash-locked-files.sh (Bash redirects) + ├── synced into → .claude/settings.json permissions.deny (hard block) + └── documented in → CLAUDE.md "Locked files" section (model-side guidance) +``` + +Bypass protocol: setting the environment variable `CLAUDE_UNLOCK=` before +the session disables the hooks for that path only. The `permissions.deny` layer is +intentionally NOT bypassable by env var — those paths require editing `settings.json`. + +Tiers: + +- **Tier 1 (HARD LOCK):** in manifest with prefix `!` → blocked by both hook and `permissions.deny`. +- **Tier 2 (SOFT LOCK):** in manifest without prefix → hook warns + may block on weakening + patterns; not in `permissions.deny`. +- **Tier 3 (FREE):** not in manifest at all. + +--- + +## Phase 0 — Working branch and manifest + +### 0.1 Branch + +```bash +git checkout -b chore/file-locking-claude-code +``` + +### 0.2 Create the manifest + +Write `.claude/locked-files.txt` with the following exact contents. Lines starting with +`#` are comments. Lines starting with `!` are Tier 1 (hard lock). All other non-blank +lines are Tier 2 (soft lock). Glob syntax follows shell globbing (`**` recursive). + +```text +# ============================================================================ +# .claude/locked-files.txt +# +# Single source of truth for file-locking. Consumed by: +# - .claude/hooks/pre-write-locked-files.sh +# - .claude/hooks/pre-bash-locked-files.sh +# - .claude/settings.json permissions.deny (synced via `just lock-sync`) +# +# Format: +# # comment +# !path TIER 1: hard lock (hook block + permissions.deny) +# path TIER 2: soft lock (hook warn / weakening detection only) +# +# Globs use shell syntax. ** matches any number of directories. +# Bypass: set CLAUDE_UNLOCK= before the session (Tier 2 hook only). +# ============================================================================ + +# ---------- Tier 1: legal & lockfiles ---------- +!LICENSE +!template/LICENSE.jinja +!uv.lock +!.secrets.baseline + +# ---------- Tier 1: repo integrity ---------- +!.gitignore +!.gitmessage +!.pre-commit-config.yaml + +# ---------- Tier 1: CI configuration ---------- +!.github/workflows/** +!template/.github/workflows/** +!.github/dependabot.yml +!.github/CODEOWNERS + +# ---------- Tier 1: meta-lock (locking the locks) ---------- +!.claude/settings.json +!.claude/locked-files.txt +!.claude/hooks/pre-write-locked-files.sh +!.claude/hooks/pre-bash-locked-files.sh +!.claude/hooks/pre-protect-uv-lock.sh +!.claude/hooks/pre-config-protection.sh +!template/.claude/settings.json + +# ---------- Tier 2: project configuration (weakening already blocked elsewhere) ---------- +pyproject.toml +copier.yml +justfile +template/justfile.jinja + +# ---------- Tier 2: standards docs (rules of the road) ---------- +.claude/rules/**/*.md +template/.claude/rules/**/*.md +.claude/commands/**/*.md +template/.claude/commands/**/*.md.jinja +.claude/CLAUDE.md + +# ---------- Tier 2: generated-project surface ---------- +template/CLAUDE.md.jinja +template/README.md.jinja +template/pyproject.toml.jinja +template/{{_copier_conf.answers_file}}.jinja +``` + +Validation: + +```bash +test -f .claude/locked-files.txt && echo OK +grep -c '^!' .claude/locked-files.txt # expect ~20 hard locks +grep -cv '^#\|^!\|^$' .claude/locked-files.txt # expect ~10 soft locks +``` + +--- + +## Phase 1 — The Tier-1 + Tier-2 hook (Write/Edit/MultiEdit) + +### 1.1 Create `.claude/hooks/pre-write-locked-files.sh` + +This hook reads the manifest, matches `tool_input.file_path` against each entry, and +either blocks or warns based on the tier prefix. + +Write this exact content: + +```bash +#!/usr/bin/env bash +# pre-write-locked-files.sh +# PreToolUse hook for Write|Edit|MultiEdit. Blocks edits to locked paths. +# +# Manifest: .claude/locked-files.txt +# !path → block (Tier 1) +# path → warn (Tier 2) +# +# Bypass: CLAUDE_UNLOCK= allows that single entry through. +# Tier 1 entries can also be bypassed via the env var, but should normally +# require editing .claude/settings.json (which is itself Tier 1). + +set -uo pipefail + +INPUT=$(cat) + +FILE_PATH=$(python3 - <<'PYEOF' <<<"$INPUT" +import json, sys +try: + data = json.loads(sys.stdin.read()) + print(data.get("tool_input", {}).get("file_path", "")) +except Exception: + print("") +PYEOF +) || { echo "$INPUT"; exit 0; } + +# No path → nothing to check → pass through. +if [[ -z "$FILE_PATH" ]]; then + echo "$INPUT" + exit 0 +fi + +MANIFEST=".claude/locked-files.txt" +if [[ ! -f "$MANIFEST" ]]; then + echo "$INPUT" + exit 0 +fi + +# Normalise to a path relative to repo root. +REPO_ROOT="$(git rev-parse --show-toplevel 2>/dev/null || pwd)" +REL_PATH="${FILE_PATH#$REPO_ROOT/}" + +match_glob() { + # $1 = glob pattern, $2 = path → 0 if match, 1 if not + local pattern="$1" path="$2" + case "$path" in + $pattern) return 0 ;; + *) return 1 ;; + esac +} + +UNLOCK="${CLAUDE_UNLOCK:-}" + +while IFS= read -r line; do + # Skip comments and blanks. + [[ -z "$line" || "$line" =~ ^# ]] && continue + + tier="soft" + pattern="$line" + if [[ "$line" == !* ]]; then + tier="hard" + pattern="${line#!}" + fi + + if match_glob "$pattern" "$REL_PATH"; then + # Bypass check. + if [[ -n "$UNLOCK" ]] && match_glob "$UNLOCK" "$REL_PATH"; then + echo "┌─ Locked-file bypass: $REL_PATH" >&2 + echo "│ Matched manifest entry: $line" >&2 + echo "│ Allowed by CLAUDE_UNLOCK=$UNLOCK" >&2 + echo "└─ Proceeding (bypass active)" >&2 + echo "$INPUT" + exit 0 + fi + + if [[ "$tier" == "hard" ]]; then + echo "┌─ Locked-file BLOCK: $REL_PATH" >&2 + echo "│ Matched Tier 1 entry: $line" >&2 + echo "│ This file is hard-locked. To edit it:" >&2 + echo "│ 1. Confirm the change is intentional with the user." >&2 + echo "│ 2. Re-run with CLAUDE_UNLOCK='$pattern' set in the environment." >&2 + echo "│ 3. Or remove/edit the entry in .claude/locked-files.txt" >&2 + echo "│ (which is itself locked — requires CLAUDE_UNLOCK)." >&2 + echo "└─ ✗ Edit blocked" >&2 + exit 2 + else + # Tier 2 — warn but allow. + echo "┌─ Locked-file WARN: $REL_PATH" >&2 + echo "│ Matched Tier 2 entry: $line" >&2 + echo "│ This file is semi-stable. Confirm the change is intentional." >&2 + echo "│ Set CLAUDE_UNLOCK='$pattern' to silence this warning." >&2 + echo "└─ ⚠ Proceeding with warning" >&2 + echo "$INPUT" + exit 0 + fi + fi +done < "$MANIFEST" + +# No match. +echo "$INPUT" +exit 0 +``` + +Make it executable: + +```bash +chmod +x .claude/hooks/pre-write-locked-files.sh +``` + +### 1.2 Quick smoke test + +Simulate a Tier-1 block: + +```bash +echo '{"tool_input": {"file_path": "'"$(pwd)"'/LICENSE"}}' \ + | bash .claude/hooks/pre-write-locked-files.sh +echo "exit=$?" +# Expect: stderr contains "Locked-file BLOCK: LICENSE", exit=2 +``` + +Simulate a Tier-2 warn: + +```bash +echo '{"tool_input": {"file_path": "'"$(pwd)"'/justfile"}}' \ + | bash .claude/hooks/pre-write-locked-files.sh +echo "exit=$?" +# Expect: stderr contains "Locked-file WARN: justfile", exit=0 +``` + +Simulate a free path: + +```bash +echo '{"tool_input": {"file_path": "'"$(pwd)"'/docs/notes.md"}}' \ + | bash .claude/hooks/pre-write-locked-files.sh +echo "exit=$?" +# Expect: no stderr, exit=0 +``` + +Simulate the bypass: + +```bash +CLAUDE_UNLOCK='LICENSE' bash -c ' + echo "{\"tool_input\": {\"file_path\": \"'"$(pwd)"'/LICENSE\"}}" \ + | bash .claude/hooks/pre-write-locked-files.sh +' +# Expect: stderr "bypass active", exit=0 +``` + +If any of the four cases above behaves differently, fix the hook before continuing. Most +likely cause: shell glob matching is locale-sensitive — make sure `shopt -s extglob` is +not required. The script uses POSIX `case` matching on purpose. + +--- + +## Phase 2 — The Bash redirect hook + +### 2.1 Why a second hook + +The Write/Edit/MultiEdit hook does NOT fire when the model writes a file via Bash: + +```bash +cat > LICENSE < FILE / cat >> FILE +# - tee FILE / tee -a FILE +# - sed -i ... FILE +# - perl -i ... FILE +# - python -c '... open(FILE, "w") ...' (best-effort regex) +# - cp/mv SOURCE FILE +# - > FILE / >> FILE (any redirect) +# +# Manifest and bypass behaviour identical to pre-write-locked-files.sh. + +set -uo pipefail + +INPUT=$(cat) + +CMD=$(python3 - <<'PYEOF' <<<"$INPUT" +import json, sys +try: + data = json.loads(sys.stdin.read()) + print(data.get("tool_input", {}).get("command", "")) +except Exception: + print("") +PYEOF +) || { echo "$INPUT"; exit 0; } + +if [[ -z "$CMD" ]]; then + echo "$INPUT" + exit 0 +fi + +MANIFEST=".claude/locked-files.txt" +[[ -f "$MANIFEST" ]] || { echo "$INPUT"; exit 0; } + +UNLOCK="${CLAUDE_UNLOCK:-}" + +# Extract candidate target paths from the command string. +# Heuristics — broad matches; we err on the side of catching too much, then check manifest. +targets=$(printf '%s\n' "$CMD" | python3 - <<'PYEOF' +import re, sys + +cmd = sys.stdin.read() +patterns = [ + r'>\s*([^\s|;&<>]+)', # redirect: > FILE or >> FILE + r'\btee\s+(?:-a\s+)?([^\s|;&]+)', # tee FILE / tee -a FILE + r'\bsed\s+(?:[^|;&]*\s)?-i[^\s]*\s+(?:[^\s|;&]+\s+)*([^\s|;&]+)', + r'\bperl\s+(?:[^|;&]*\s)?-i[^\s]*\s+(?:[^\s|;&]+\s+)*([^\s|;&]+)', + r'\b(?:cp|mv)\s+(?:-[^\s]+\s+)*[^\s|;&]+\s+([^\s|;&]+)', +] +hits = set() +for p in patterns: + for m in re.finditer(p, cmd): + target = m.group(1).strip("\"'") + if target and not target.startswith('-'): + hits.add(target) +print('\n'.join(hits)) +PYEOF +) + +[[ -z "$targets" ]] && { echo "$INPUT"; exit 0; } + +REPO_ROOT="$(git rev-parse --show-toplevel 2>/dev/null || pwd)" + +match_glob() { + local pattern="$1" path="$2" + case "$path" in + $pattern) return 0 ;; + *) return 1 ;; + esac +} + +while IFS= read -r raw_target; do + [[ -z "$raw_target" ]] && continue + # Strip leading repo root if absolute. + target="${raw_target#$REPO_ROOT/}" + target="${target#./}" + + while IFS= read -r line; do + [[ -z "$line" || "$line" =~ ^# ]] && continue + tier="soft"; pattern="$line" + if [[ "$line" == !* ]]; then tier="hard"; pattern="${line#!}"; fi + + if match_glob "$pattern" "$target"; then + if [[ -n "$UNLOCK" ]] && match_glob "$UNLOCK" "$target"; then + continue 2 # bypass — try next target + fi + if [[ "$tier" == "hard" ]]; then + echo "┌─ Bash locked-file BLOCK" >&2 + echo "│ Command attempts to write: $target" >&2 + echo "│ Matched Tier 1 entry: $line" >&2 + echo "│ Set CLAUDE_UNLOCK='$pattern' to override." >&2 + echo "└─ ✗ Command blocked" >&2 + exit 2 + else + echo "┌─ Bash locked-file WARN" >&2 + echo "│ Command writes to Tier 2 path: $target" >&2 + echo "│ Matched: $line" >&2 + echo "└─ ⚠ Proceeding with warning" >&2 + fi + fi + done < "$MANIFEST" +done <<<"$targets" + +echo "$INPUT" +exit 0 +``` + +```bash +chmod +x .claude/hooks/pre-bash-locked-files.sh +``` + +### 2.3 Smoke test + +```bash +# Tier 1 redirect → block +echo '{"tool_input": {"command": "echo x > LICENSE"}}' \ + | bash .claude/hooks/pre-bash-locked-files.sh +echo "exit=$?" # expect 2 + +# Innocent command → pass +echo '{"tool_input": {"command": "ls -la"}}' \ + | bash .claude/hooks/pre-bash-locked-files.sh +echo "exit=$?" # expect 0 + +# sed -i on workflow → block +echo '{"tool_input": {"command": "sed -i s/foo/bar/ .github/workflows/lint.yml"}}' \ + | bash .claude/hooks/pre-bash-locked-files.sh +echo "exit=$?" # expect 2 +``` + +If a smoke test fails (especially on macOS where BSD sed differs), inspect the regex +output and adjust. Do NOT loosen the patterns to make tests pass — instead add the +specific failing case to the regex set. + +--- + +## Phase 3 — Register both hooks in `settings.json` + +`.claude/settings.json` is itself Tier 1 — the very first edit will trigger your hook +before it is even registered. Use the bypass: + +```bash +export CLAUDE_UNLOCK='.claude/settings.json' +``` + +(After this phase commits, unset it: `unset CLAUDE_UNLOCK`.) + +### 3.1 Add hook entries + +Open `.claude/settings.json`. Find the `"PreToolUse"` array. Insert these entries at the +TOP of the array (before any existing PreToolUse hook), so locked-file checks run first +and can short-circuit: + +```json +{ + "matcher": "Write|Edit|MultiEdit", + "hooks": [ + { + "type": "command", + "command": "bash .claude/hooks/pre-write-locked-files.sh" + } + ], + "description": "Block edits to files listed in .claude/locked-files.txt (Tier 1) and warn on Tier 2" +}, +{ + "matcher": "Bash", + "hooks": [ + { + "type": "command", + "command": "bash .claude/hooks/pre-bash-locked-files.sh" + } + ], + "description": "Block shell commands writing to locked paths (redirects, tee, sed -i, etc.)" +}, +``` + +### 3.2 Add `permissions.deny` entries + +Append to the existing `"permissions"."deny"` array. Generate the list by reading the +manifest and emitting `Edit()`, `Write()`, `MultiEdit()` for every +Tier-1 entry: + +```bash +grep '^!' .claude/locked-files.txt | sed 's/^!//' | while read -r p; do + printf ' "Edit(%s)",\n "Write(%s)",\n "MultiEdit(%s)",\n' "$p" "$p" "$p" +done +``` + +Paste the output into the `deny` array. Result for `LICENSE` should look like: + +```json +"deny": [ + "Bash(git push:*)", + "Bash(uv publish:*)", + "Bash(git commit --no-verify:*)", + "Bash(git push --force:*)", + "Edit(LICENSE)", + "Write(LICENSE)", + "MultiEdit(LICENSE)", + "Edit(template/LICENSE.jinja)", + "...etc..." +] +``` + +JSON validity: run `python3 -m json.tool .claude/settings.json > /dev/null` — must exit 0. + +### 3.3 Mirror to `template/.claude/settings.json` + +Generated projects also need protection. Repeat 3.1 and 3.2 for +`template/.claude/settings.json`, but use a TRIMMED manifest list — generated projects +do not have `template/`, `copier.yml`, or meta-only paths. Use only: + +- `LICENSE` +- `pyproject.toml` (Tier 2) +- `uv.lock` +- `.secrets.baseline` +- `.gitignore` +- `.pre-commit-config.yaml` +- `.github/workflows/**` +- `.github/dependabot.yml` +- `.claude/settings.json` +- `.claude/locked-files.txt` (the file itself will be created in template too — see Phase 5) +- `.claude/hooks/pre-write-locked-files.sh` +- `.claude/hooks/pre-bash-locked-files.sh` + +### 3.4 Validation + +```bash +python3 -m json.tool .claude/settings.json > /dev/null && echo "settings.json OK" +python3 -m json.tool template/.claude/settings.json > /dev/null && echo "template OK" +unset CLAUDE_UNLOCK +``` + +Commit (with bypass active for the settings file you just wrote): + +```bash +CLAUDE_UNLOCK='.claude/settings.json' git add .claude/settings.json template/.claude/settings.json +git add .claude/locked-files.txt .claude/hooks/pre-write-locked-files.sh .claude/hooks/pre-bash-locked-files.sh +git commit -m "feat(claude): add file-locking via manifest, hooks, and permissions.deny" +``` + +--- + +## Phase 4 — `just lock-sync` recipe + +Hand-syncing `permissions.deny` against the manifest will drift. Add a justfile recipe. + +### 4.1 Edit `justfile` + +`justfile` is Tier 2 — you will get a warning, not a block. Append at the end: + +```makefile +# ------------------------------------------------------------------------- +# File-locking +# ------------------------------------------------------------------------- + +# Sync .claude/settings.json permissions.deny with .claude/locked-files.txt (Tier 1 only). +# Print-only by default; use `just lock-sync APPLY=1` to write. +lock-sync APPLY="0": + @uv run python scripts/sync_lock_manifest.py {{ if APPLY == "1" { "--apply" } else { "" } }} + +# List all locked files (Tier 1 + Tier 2) from the manifest. +lock-list: + @grep -v '^#\|^$$' .claude/locked-files.txt | sed 's/^!/[T1] /; t; s/^/[T2] /' +``` + +### 4.2 Create `scripts/sync_lock_manifest.py` + +This script reads the manifest, computes the desired `permissions.deny` Tier-1 entries, +diffs against the actual JSON, prints the diff, and optionally writes. + +Skeleton (the executor must complete docstrings/types per project style): + +```python +"""Sync .claude/settings.json permissions.deny against .claude/locked-files.txt. + +Reads the manifest, computes the desired set of Edit(), Write(), +MultiEdit() entries for every Tier 1 (`!`-prefixed) line, compares against +the current settings.json deny list, and prints the diff. With --apply, writes +the merged result back to settings.json (preserving non-locking deny entries +such as Bash(git push:*)). +""" + +from __future__ import annotations + +import argparse +import json +import sys +from pathlib import Path + +REPO_ROOT = Path(__file__).resolve().parent.parent +MANIFEST = REPO_ROOT / ".claude" / "locked-files.txt" +SETTINGS = REPO_ROOT / ".claude" / "settings.json" + + +def read_tier1_paths(manifest: Path) -> list[str]: + """Return the list of Tier-1 (hard-lock) paths from the manifest.""" + out: list[str] = [] + for raw in manifest.read_text(encoding="utf-8").splitlines(): + line = raw.strip() + if not line or line.startswith("#"): + continue + if line.startswith("!"): + out.append(line[1:]) + return out + + +def desired_deny_entries(paths: list[str]) -> list[str]: + """Build the deny strings for each Tier-1 path.""" + entries: list[str] = [] + for p in paths: + entries.append(f"Edit({p})") + entries.append(f"Write({p})") + entries.append(f"MultiEdit({p})") + return entries + + +def main() -> int: + """Entry point — diff or apply the lock manifest sync.""" + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("--apply", action="store_true", help="Write changes to settings.json") + args = parser.parse_args() + + settings = json.loads(SETTINGS.read_text(encoding="utf-8")) + deny = settings.setdefault("permissions", {}).setdefault("deny", []) + + desired = desired_deny_entries(read_tier1_paths(MANIFEST)) + desired_set = set(desired) + existing_lock_entries = { + e for e in deny if e.startswith(("Edit(", "Write(", "MultiEdit(")) + } + non_lock = [e for e in deny if not e.startswith(("Edit(", "Write(", "MultiEdit("))] + + to_add = sorted(desired_set - existing_lock_entries) + to_remove = sorted(existing_lock_entries - desired_set) + + print(f"Manifest Tier-1 paths: {len(desired) // 3}") + print(f"Entries to add: {len(to_add)}") + print(f"Entries to remove: {len(to_remove)}") + for e in to_add: + print(f" + {e}") + for e in to_remove: + print(f" - {e}") + + if not args.apply: + if to_add or to_remove: + print("\nRun with --apply (or `just lock-sync APPLY=1`) to write.") + return 1 + print("settings.json already in sync with manifest.") + return 0 + + new_deny = sorted(set(non_lock) | desired_set, key=lambda s: (not s.startswith("Bash"), s)) + settings["permissions"]["deny"] = new_deny + SETTINGS.write_text(json.dumps(settings, indent=2) + "\n", encoding="utf-8") + print("Wrote .claude/settings.json") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) +``` + +### 4.3 Add a corresponding test + +`tests/test_sync_lock_manifest.py` (per the strict-TDD convention — write the test first +in real practice; here you are retrofitting). Cover: + +- Manifest with no Tier-1 entries → no deny additions. +- Manifest with one Tier-1 entry → three deny additions (Edit/Write/MultiEdit). +- Existing non-lock deny entries (e.g. `Bash(git push:*)`) are preserved. +- `--apply` writes valid JSON; without `--apply` does not modify the file. + +### 4.4 Validation + +```bash +just lock-list # prints all locked entries with [T1]/[T2] prefix +just lock-sync # diff only — should print "already in sync" after Phase 3 +just test # all tests pass +``` + +Commit: + +```bash +git add justfile scripts/sync_lock_manifest.py tests/test_sync_lock_manifest.py +git commit -m "feat(lock): add just lock-sync and lock-list recipes" +``` + +--- + +## Phase 5 — Mirror to template/ + +Generated projects need their own copies of the hooks and a starter manifest. Mirror: + +```bash +cp .claude/hooks/pre-write-locked-files.sh template/.claude/hooks/ +cp .claude/hooks/pre-bash-locked-files.sh template/.claude/hooks/ +chmod +x template/.claude/hooks/pre-write-locked-files.sh +chmod +x template/.claude/hooks/pre-bash-locked-files.sh +``` + +Create `template/.claude/locked-files.txt` with the trimmed list from Phase 3.3: + +```text +# Locked-file manifest for generated projects. +# See template/.claude/CLAUDE.md (or the Cookiecutter README) for usage. + +!LICENSE +!uv.lock +!.secrets.baseline +!.gitignore +!.pre-commit-config.yaml +!.github/workflows/** +!.github/dependabot.yml +!.claude/settings.json +!.claude/locked-files.txt +!.claude/hooks/pre-write-locked-files.sh +!.claude/hooks/pre-bash-locked-files.sh + +pyproject.toml +justfile +.claude/rules/**/*.md +.claude/commands/**/*.md +``` + +The hook entries you added to `template/.claude/settings.json` in Phase 3.3 already +reference these scripts, so this completes the wiring. + +### 5.1 Update template tests + +Edit `tests/test_template.py` to assert that a fresh `copier copy` produces: + +- `.claude/locked-files.txt` (file exists, contains `!LICENSE`) +- `.claude/hooks/pre-write-locked-files.sh` (executable bit set) +- `.claude/hooks/pre-bash-locked-files.sh` (executable bit set) +- `.claude/settings.json` `permissions.deny` includes `Edit(LICENSE)` + +### 5.2 Validation + +```bash +just sync-check # root↔template parity must still pass +just test # template tests must pass +``` + +Commit: + +```bash +git add template/.claude/ tests/test_template.py +git commit -m "feat(template): mirror file-locking into generated projects" +``` + +--- + +## Phase 6 — CLAUDE.md documentation + +Hooks and `permissions.deny` are enforcement; documentation is so the model never tries. +Add a "Locked files" section to root `CLAUDE.md` and `template/CLAUDE.md.jinja`. + +### 6.1 Root `CLAUDE.md` + +CLAUDE.md is NOT in the Tier-2 list (intentional — it evolves). Add this section after +"Files you should never modify directly": + +```markdown +## File locking (Claude Code) + +This repository uses a manifest-driven file-locking system to prevent agents from +modifying stable files without explicit user intent. + +- **Manifest:** `.claude/locked-files.txt` — single source of truth. +- **Tier 1 (hard lock, prefix `!`):** blocked by both a PreToolUse hook AND + `permissions.deny`. Editing requires the user to set `CLAUDE_UNLOCK=` in the + environment AND (for `permissions.deny` overrides) edit `settings.json` directly. +- **Tier 2 (soft lock, no prefix):** hook prints a warning but allows the edit. Used for + files that evolve but should not be reformatted casually. +- **Tier 3 (everything else):** no restriction. + +Agents must consult `.claude/locked-files.txt` BEFORE attempting to edit anything in +`.github/workflows/`, `LICENSE`, `pyproject.toml`, `copier.yml`, `justfile`, or any other +infrastructure file. If a change is genuinely needed, ask the user to confirm and to set +`CLAUDE_UNLOCK`. + +Sync `permissions.deny` against the manifest with: + + just lock-sync # diff only + just lock-sync APPLY=1 # write +``` + +### 6.2 `template/CLAUDE.md.jinja` + +Mirror the same section, but reference the trimmed manifest. Do not include the Copier- +specific paths (`copier.yml`, `template/`). + +### 6.3 Update `.claude/CLAUDE.md` orientation + +Add a one-paragraph mention of the locked-files manifest in `.claude/CLAUDE.md`'s +"Directory layout" or "settings.json" section pointing at `.claude/locked-files.txt`. + +### 6.4 Validation + +```bash +grep -q "locked-files.txt" CLAUDE.md && echo OK +grep -q "locked-files.txt" template/CLAUDE.md.jinja && echo OK +grep -q "locked-files.txt" .claude/CLAUDE.md && echo OK +just docs-check +``` + +Commit: + +```bash +git add CLAUDE.md template/CLAUDE.md.jinja .claude/CLAUDE.md +git commit -m "docs: document file-locking system in CLAUDE.md (root + template)" +``` + +--- + +## Phase 7 — End-to-end verification + +### 7.1 Live test of the full stack + +With NO `CLAUDE_UNLOCK` set, attempt these in your Claude Code session and confirm each +fails as expected: + +| Action | Expected outcome | +|---|---| +| Edit `LICENSE` | `permissions.deny` blocks before the hook runs | +| Edit `.github/workflows/tests.yml` | `permissions.deny` blocks | +| Edit `pyproject.toml` (Tier 2) | hook prints WARN; edit proceeds | +| `cat > LICENSE` via Bash | `pre-bash-locked-files.sh` blocks | +| Edit `docs/notes.md` | no restriction | + +With `CLAUDE_UNLOCK='LICENSE'` set: + +| Action | Expected outcome | +|---|---| +| Edit `LICENSE` via Edit tool | `permissions.deny` STILL blocks (env var does not override deny) | +| `cat > LICENSE` via Bash | hook bypasses; command runs | + +This second behaviour is intentional. `permissions.deny` is the strongest layer because +it requires an interactive UI confirmation; `CLAUDE_UNLOCK` is the relief valve for +hook-only enforcement. To edit a Tier-1 file via the Edit tool, the user must edit +`settings.json` to remove the deny entry — which itself requires `CLAUDE_UNLOCK` set on +`.claude/settings.json`. This is the meta-lock by design. + +### 7.2 Full CI + +```bash +just ci +``` + +Must pass. + +### 7.3 PR + +```bash +git push -u origin chore/file-locking-claude-code +``` + +PR body template: + +``` +## Summary + +Implements manifest-driven file locking for Claude Code agents. + +## What changed + +- New manifest: `.claude/locked-files.txt` (Tier 1 / Tier 2 entries) +- New hooks: `pre-write-locked-files.sh`, `pre-bash-locked-files.sh` +- `permissions.deny` synced from manifest (Tier 1 only) +- New justfile recipes: `just lock-sync`, `just lock-list` +- New script: `scripts/sync_lock_manifest.py` (+ tests) +- Mirrored into `template/.claude/` for generated projects +- Documented in CLAUDE.md (root + template) + +## How agents bypass when a change is needed + +- Tier 2 (soft): set `CLAUDE_UNLOCK=` for the session. +- Tier 1 (hard): user removes the entry from `.claude/locked-files.txt` AND/OR + `.claude/settings.json` deny list. Editing those files itself requires `CLAUDE_UNLOCK`. + +## Validation + +- `just ci` passes +- 5 hook smoke tests in Phase 1.2 / 2.3 all behave as expected +- Live test matrix in Phase 7.1 matches expected outcomes +``` + +--- + +## Anti-goals — what NOT to do + +- **Do NOT** add Tier-1 entries for files the project actively edits (e.g. `README.md`, + `CLAUDE.md`, anything under `docs/`, any `tests/` or `scripts/` file). This will quickly + make the repo unworkable. +- **Do NOT** rely on `CLAUDE.md` guidance alone. It is the weakest layer; always pair + with the hook and (for true Tier 1) `permissions.deny`. +- **Do NOT** create per-file hooks. One generic hook reading the manifest is the point. +- **Do NOT** skip the meta-lock entries (`.claude/settings.json`, the manifest itself, + the lock hooks). Without these, an agent can simply edit the manifest to remove its + own restrictions. +- **Do NOT** add file-locking to git pre-commit hooks. This document is scoped to Claude + Code only — git-level enforcement is a separate problem. +- **Do NOT** widen the Bash-redirect regex to catch every conceivable shell trick. + Determined adversarial bypass is out of scope; the goal is to prevent accidental + modification, not to be a security boundary. +- **Do NOT** modify `uv.lock`, `.copier-answers.yml`, or any `.jinja` template body + while implementing this plan beyond what is explicitly listed. + +--- + +## Quick reference — final expected state + +| Artefact | Location | +|---|---| +| Manifest | `.claude/locked-files.txt` | +| Write/Edit hook | `.claude/hooks/pre-write-locked-files.sh` | +| Bash hook | `.claude/hooks/pre-bash-locked-files.sh` | +| Hook registrations | `.claude/settings.json` PreToolUse | +| Hard-lock denies | `.claude/settings.json` permissions.deny | +| Sync script | `scripts/sync_lock_manifest.py` | +| Sync recipe | `just lock-sync` (+ `just lock-list`) | +| Sync tests | `tests/test_sync_lock_manifest.py` | +| Template manifest | `template/.claude/locked-files.txt` | +| Template hooks | `template/.claude/hooks/pre-{write,bash}-locked-files.sh` | +| Documentation | `CLAUDE.md`, `template/CLAUDE.md.jinja`, `.claude/CLAUDE.md` | + +Bypass mechanisms: + +| Layer | Bypass | +|---|---| +| `permissions.deny` (Tier 1) | User edits `.claude/settings.json` (which itself requires bypass) | +| Hooks (Tier 1 + Tier 2) | `CLAUDE_UNLOCK=` env var | +| `CLAUDE.md` guidance | None — soft layer, model-side only | + +If any row in this section is not satisfied at the end of execution, the plan is not +complete. diff --git a/docs/github-repository-settings.md b/docs/github-repository-settings.md new file mode 100644 index 0000000..3207346 --- /dev/null +++ b/docs/github-repository-settings.md @@ -0,0 +1,183 @@ +# GitHub repository settings (maintainer checklist) + +**Use this file as the only checklist** for what to configure on **github.com** for this repository (Settings UI, branch protection / rulesets, and merge options). Other docs link here instead of repeating steps. + +GitHub does not read this file automatically—apply everything below in the web UI (or your org’s infrastructure-as-code). + +--- + +## 1. Protect the default branch (`main`) + +**Settings → Rules → Rulesets** (recommended), or **Settings → Branches → Branch protection rules** (classic). + +Create a rule targeting your default branch (usually `main`). + +--- + +## 2. Require pull requests + +Enable **Require a pull request before merging** so: + +- Direct pushes to `main` are blocked +- Every change is proposed as a PR + +--- + +## 3. Require reviews (recommended for teams) + +Under the same rule: + +- Require at least **one approving review** before merge +- Optionally **dismiss stale reviews** when new commits are pushed + +> **Solo maintainers:** In the usual GitHub flow you **cannot approve your own** pull request. Leaving required approvals enabled can **block merges** on a one-person repo. Omit this requirement and rely on **required status checks** (section 4), **PR policy** when you use PRs, and local habits such as `just review`. See **section 12**. + +--- + +## 4. Require status checks (CI) + +When GitHub Actions run on pull requests: + +1. Open any PR (or create a test PR) and open the **Checks** tab. +2. Note the **exact** check names (for example `Workflow name / job name`). +3. In branch protection / rulesets, add those checks as **required** so failing CI blocks merge. + +Typical workflow **display names** include **CI** or **CI Tests**, **Lint**, **Security** (if +present), and **PR policy**—they differ per repository. Always confirm the exact strings in a +pull request’s **Checks** tab before adding them as required. + +Add **`PR policy / pr-policy`** (workflow **PR policy**, job `pr-policy`) if you want PR +title, body (against `.github/PULL_REQUEST_TEMPLATE.md`), and commit-subject rules enforced on +GitHub. The PR template file alone does not block merges; this workflow does. + +**Tip:** You can also require **Dependency review**, **labeler**, or other workflows; names must match what GitHub shows on the PR. + +--- + +## 5. Pull request titles and conventional commits + +When you **squash merge**, GitHub often uses the **PR title** as the default squash commit subject. + +- Use **Conventional Commits** for PR titles (for example `feat: add export`) so the resulting commit on `main` stays consistent with local **commit-msg** hooks and project policy. +- Locally, `just precommit-install` registers commit-msg hooks and Git’s **commit template** (`.gitmessage`); align PR titles with the same convention when you expect squash merges. + +--- + +## 6. Block unsafe updates + +Enable: + +- **Block force pushes** +- **Block branch deletion** + +Optionally restrict who may push (if your org allows bypass lists). + +--- + +## 7. Squash-only merges + +**Settings → General → Pull Requests**: + +- Enable **Allow squash merging** +- Disable **Allow merge commits** (unless your policy explicitly needs them) +- Disable **Allow rebase merging** (optional; squash-only keeps history uniform) + +Each merged PR becomes a single commit on `main`. + +--- + +## 8. Include administrators + +Turn on enforcement so **admins are subject to the same rules** (no silent bypass of protection). + +--- + +## 9. Optional: linear history + +If available for your account, **require linear history** pairs well with squash merges and avoids merge commits on the default branch. + +--- + +## 10. New repositories + +- Branch protection applies once the protected branch exists; the **first** push that creates `main` may succeed before rules are saved. +- Prefer: add an initial commit (e.g. README), then apply protection and merge settings, then collaborate via PRs. + +--- + +## 11. Repository secrets and variables + +Workflows that need API tokens or credentials use **Settings → Secrets and variables → Actions** +(organization secrets if applicable). Exact names and setup steps depend on the workflow—for +example Codecov is documented next to the docs CI in this template’s **`docs/ci.md`** (when +MkDocs is enabled). Never commit secrets or paste them into Copier answers. + +--- + +## 12. Solo / personal maintainer + +Personal and private repositories can still use **rulesets**, **required status checks**, and the +workflows in `.github/workflows/`—the main mismatch with this checklist is **required approvals** +(section 3) when you are the only human. + +### What adds value versus ceremony (solo) + +Many items above are **team gates**: they make sense when someone else merges or reviews. Alone, +you still *can* turn them on, but the **payoff** is uneven: + +| Kind | Examples | Solo takeaway | +| ---- | -------- | ------------- | +| **High value** | CI on `push` / PR, **block force push** and **branch deletion**, local **pre-commit** / **`just ci-check`** | Catches breakage and accidents **without** a second person. This is where most real enforcement lives. | +| **Useful if you like the workflow** | **Require PR**, **required status checks** on PRs, **PR policy** as a required check | Mostly **self-discipline**: you are still approving your own work. Benefits: run CI before merging to `main`, avoid direct pushes by mistake, keep PR titles/bodies consistent—not independent review. | +| **Little extra value** | **Required approvals**, **include administrators** when you are the only admin | No second human to block bad merges; “no bypass” matters when others share the repo. Safe to skip or treat as documentation for a future team. | + +If GitHub settings feel like pointless clicks, lean on **CI + local hooks** and merge however you +prefer (PR or direct), as long as you accept the trade-offs. + +### GitHub (when you use pull requests) + +Configure a ruleset on `main` roughly as follows: + +- **Require a pull request before merging** if you want the server to block direct pushes. +- **Require status checks** to pass—copy exact names from a real PR’s **Checks** tab (section 4). +- Add **`PR policy / pr-policy`** as a required check if you want PR title, body, and commit + subjects enforced before merge. +- **Do not** require approving reviews if you cannot supply a second approver. +- Keep **block force pushes**, **block branch deletion**, and **include administrators** (sections 6 + and 8). + +### Local enforcement (with or without GitHub protection) + +These work entirely on your machine: + +- **`no-commit-to-branch`** in `.pre-commit-config.yaml` blocks commits on `main` / `master` so + you develop on a feature branch (pair with GitHub’s “require PR” if you want the same rule + remotely). +- **`commit-msg`** hooks (**Conventional Commits**) run on every commit after `just precommit-install`. +- **`pre-push`** runs **`just ci-check`** (read-only full gate: sync, lint, types, tests, pre-commit, + audit) before `git push` succeeds—slower than commit time, but catches CI failures early. + To bypass occasionally (not for routine use): `SKIP=just-ci-check git push` or `git push --no-verify`. + +--- + +## Summary + +| Goal | Where to configure | +| ---- | ------------------ | +| No direct pushes to `main` | Branch protection / rulesets | +| PRs required | Branch protection / rulesets | +| Reviews + required CI | Branch protection / rulesets (skip required reviews if solo — section 12) | +| PR title / body / commits policy | Required check **PR policy / pr-policy** | +| One commit per PR | **Settings → General → Pull requests**: squash on, merge commits off | +| Admins follow rules | “Include administrators” / equivalent | +| PR title matches squash commit subject | Use Conventional Commits on PR titles (section 5) | +| Solo: parity without required approvals | Section 12 (rulesets + local hooks) | +| API tokens for workflows | **Settings → Secrets and variables → Actions** (section 11) | + +--- + +## Workflow names on your repository + +Required check names depend on which Actions workflows exist in **your** repository. **Always** +copy the exact strings from a real pull request’s **Checks** tab before adding them to branch +protection. diff --git a/justfile b/justfile index c987073..ec21651 100644 --- a/justfile +++ b/justfile @@ -234,3 +234,7 @@ freshness: # Validate root/template sync map and parity checks sync-check: @uv run python scripts/check_root_template_sync.py + +# Print a conventional PR title + PR body (template + git log) for pr-policy compliance +pr-draft: + @uv run python scripts/pr_commit_policy.py draft diff --git a/scripts/CLAUDE.md b/scripts/CLAUDE.md index a1fcd9d..8c889e9 100644 --- a/scripts/CLAUDE.md +++ b/scripts/CLAUDE.md @@ -78,9 +78,15 @@ via `logging_manager.write_machine_stdout_line` (T20 still enforces no `print()` Validates pull request titles and bodies (against `.github/PULL_REQUEST_TEMPLATE.md`) and conventional commit subjects over a `git rev-list` range. -**Invocation:** `python3 scripts/pr_commit_policy.py pr` with `PR_TITLE` and `PR_BODY` set, or -`python3 scripts/pr_commit_policy.py commits` with `PR_BASE_SHA` and `PR_HEAD_SHA` (or -`--base` / `--head`). +**Invocation:** + +- CI / check: `python3 scripts/pr_commit_policy.py pr` with `PR_TITLE` and `PR_BODY` set, or + `python3 scripts/pr_commit_policy.py commits` with `PR_BASE_SHA` and `PR_HEAD_SHA` (or + `--base` / `--head`). +- Local automation: `just pr-draft` → `pr_commit_policy.py draft` prints a Conventional + Commits title (from `type/slug-branch` or the latest valid commit subject) and a PR body + from `.github/PULL_REQUEST_TEMPLATE.md` with *Changes introduced* bullets from + `git log ..HEAD` (default base: `origin/main` or `main`). **Used by:** `.github/workflows/pr-policy.yml` (and the generated-project copy from `template/`). @@ -105,28 +111,6 @@ Actions pins, shared recipes, and other policy maps). --- -### `generate_template_vs_obsidian_comparison.py` - -Writes [`docs/template_vs_obsidian_playwright_pipeline.md`](../docs/template_vs_obsidian_playwright_pipeline.md): -per-template-file existence check against a reference generated repo plus a **Content match** -column (UTF-8 text vs a fresh `copier copy` with that repo’s answers, newline-normalised). - -**Invocation:** `uv run python scripts/generate_template_vs_obsidian_comparison.py` - -**Requires:** `copier` on PATH; edit `OBS_ROOT` in the script to point at the reference project. - ---- - -### `generate_obsidian_porting_report.py` - -Writes [`docs/obsidian_playwright_porting_report.md`](../docs/obsidian_playwright_porting_report.md): -per-file notes on what was ported from a reference generated project (paths and Copier -answers are edited at the top of the script) versus what stays app-specific. - -**Invocation:** `uv run python scripts/generate_obsidian_porting_report.py` - ---- - ### `sync_skip_if_exists.py` Synchronises the `_skip_if_exists` list in `copier.yml` with the actual template file paths @@ -142,15 +126,6 @@ and their commit frequency. --- -### `update_files.sh` - -Batch file update helper used during template development to propagate changes across -multiple related files. - -**Invocation:** `bash scripts/update_files.sh` - ---- - ## Adding a new script 1. Place the script in this directory. diff --git a/scripts/generate_obsidian_porting_report.py b/scripts/generate_obsidian_porting_report.py deleted file mode 100644 index b836785..0000000 --- a/scripts/generate_obsidian_porting_report.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/env python3 -"""Regenerate docs/obsidian_playwright_porting_report.md for a generated-project path. - -Compares every file under ``template/`` to a reference checkout (default: Obsidian pipeline -repo). Edit ``ANSWERS`` and ``OBS_ROOT`` if you point at another generated tree. - -Usage: - uv run python scripts/generate_obsidian_porting_report.py -""" - -from __future__ import annotations - -import sys -from pathlib import Path - -from jinja2 import Environment - -REPO_ROOT = Path(__file__).resolve().parents[1] -TEMPLATE_ROOT = REPO_ROOT / "template" -OUT_PATH = REPO_ROOT / "docs" / "obsidian_playwright_porting_report.md" - -OBS_ROOT = Path("/Users/kzqr495/Documents/workspace/obsidian_playwright_pipeline") - -ANSWERS: dict[str, object] = { - "package_name": "obsidian_playwright_pipeline", - "include_cli": False, - "include_docs": True, - "include_git_cliff": True, - "include_numpy": False, - "include_pandas_support": False, - "include_pypi_publish": False, - "include_release_workflow": True, - "include_security_scanning": True, - "_copier_conf": {"answers_file": ".copier-answers.yml"}, -} - -PORTED: dict[str, str] = { - "tests/conftest.py.jinja": ( - "Added `# pyright: ignore[reportMissingImports]` on `pytest` import " - "(matches strict typing in generated tests)." - ), - "tests/test_imports.py.jinja": ( - "Module `pytestmark = pytest.mark.unit`; stronger `__version__` checks; " - "`pytest.skip` when metadata unavailable; assert major.minor digits." - ), - "tests/{{ package_name }}/test_core.py.jinja": ( - "Module `pytestmark = pytest.mark.unit` for layered test selection." - ), - "tests/{{ package_name }}/test_support.py.jinja": ( - "Module `pytestmark = pytest.mark.unit` for layered test selection." - ), -} - -SPECIAL_NOT_PORTED: dict[str, str] = { - "pyproject.toml.jinja": ( - "Obsidian adds `playwright`, path-specific Ruff `per-file-ignores`, and custom pytest " - "marker text. Not merged into the generic template without new Copier flags." - ), - "env.example.jinja": ( - "Obsidian documents browser/vault/pipeline variables. Template keeps generic logging " - "and placeholder sections; see **Obsidian-only assets**." - ), - "CLAUDE.md.jinja": ( - "Obsidian's rendered `CLAUDE.md` includes pipeline and `run_pipeline.sh` docs; domain " - "content stays in the app repo." - ), - "README.md.jinja": ( - "Obsidian README describes the clipper product; template README remains neutral scaffold." - ), -} - -OBSIDIAN_ONLY_TABLE = """ -| Path pattern | Why not moved into `template/` | -|--------------|--------------------------------| -| `src/.../clip_to_obsidian.py`, `tracker.py`, `constants.py` | Application-specific Obsidian/Playwright pipeline. | -| `dependencies` / `playwright` in `pyproject.toml` | Product dependency; needs an optional Copier flag to generalise. | -| Ruff `per-file-ignores` for clip/tracker paths | Hard-coded module paths; not portable. | -| `tests/unit/**`, `tests/integration/**`, `tests/e2e/**` | Alternate layout vs template's `tests//`; large churn. | -| `.claude/skills/pytest-skill-updated/**` | Parallel skill with duplicate `name: pytest`; merge/rename only. | -| `docs/PIPELINE_SETUP.md`, `docs/pipeline-execution.md` | Domain runbooks. | -| `run_pipeline.sh`, `input/`, `output/` | Local execution and data. | -""" - - -def render_path_segment(segment: str) -> str: - """Render a single path segment with Copier-style ``{{ }}`` variables.""" - env = Environment(variable_start_string="{{", variable_end_string="}}") - return env.from_string(segment).render(**ANSWERS) - - -def template_file_to_dest_rel(path: Path) -> str | None: - """Map a template source path to its rendered relative path, or None if skipped.""" - rel = path.relative_to(TEMPLATE_ROOT) - parts: list[str] = [] - for part in rel.parts: - if part.endswith(".jinja"): - stem = part[: -len(".jinja")] - rendered = render_path_segment(stem) - if not rendered.strip(): - return None - parts.append(rendered) - else: - parts.append(render_path_segment(part)) - return str(Path(*parts)) - - -def not_ported_note(trel: str, dest: str | None, exists: bool) -> str: - """Explain why Obsidian-specific content was not copied into the template row.""" - if trel in SPECIAL_NOT_PORTED: - return SPECIAL_NOT_PORTED[trel] - if dest is None: - return "N/A — template emits no file for these answers (`include_cli: false`)." - if trel in PORTED: - return ( - "See **Ported** column; any remaining Obsidian-only content here was not applicable " - "or already equivalent." - ) - if not exists: - if trel.startswith("tests/") and trel.endswith(".jinja"): - return ( - "Obsidian removed this default path; tests live under `tests/unit/`, " - "`tests/integration/`, or `tests/e2e/` instead. Nothing to merge from the old path." - ) - return ( - "Obsidian has no file at this rendered path (never copied, deleted, or renamed). " - "Nothing to merge from the project for this row." - ) - return ( - "Counterpart exists; content matches template intent after render. No extra generic " - "delta to port (app-specific edits stay downstream)." - ) - - -def main() -> int: - """Write the porting report markdown and return a process exit code.""" - if not TEMPLATE_ROOT.is_dir(): - print("template/ not found", file=sys.stderr) - return 1 - if not OBS_ROOT.is_dir(): - print(f"Obsidian root not found: {OBS_ROOT}", file=sys.stderr) - return 1 - - rows: list[tuple[str, str, str, str, str]] = [] - skipped: list[str] = [] - - for p in sorted(TEMPLATE_ROOT.rglob("*")): - if p.is_dir() or p.name == ".DS_Store": - continue - trel = str(p.relative_to(TEMPLATE_ROOT)) - dest = template_file_to_dest_rel(p) - if dest is None: - skipped.append(trel) - continue - exists = (OBS_ROOT / dest).is_file() - ported = PORTED.get(trel, "—") - np_note = not_ported_note(trel, dest, exists) - ex = "yes" if exists else "**no**" - rows.append((trel, dest, ex, ported, np_note)) - - lines: list[str] = [ - "# Obsidian Playwright Pipeline → template porting report", - "", - "This report records what was **integrated** from the reference generated project into " - "[`template/`](../template/) and, **for every file under `template/`**, what was " - "**not** moved and why.", - "", - f"**Reference tree:** `{OBS_ROOT}`", - "", - "## Integrated (template updates sourced from that project)", - "", - ] - for path, desc in PORTED.items(): - lines.append(f"- **`template/{path}`** — {desc}") - lines.extend( - [ - "", - "## Obsidian-only assets (not suitable for generic template)", - "", - OBSIDIAN_ONLY_TABLE.strip(), - "", - "## Skipped template sources (no rendered file for these answers)", - "", - ] - ) - lines.extend(f"- `{s}`" for s in skipped) - lines.extend( - [ - "", - "## Per-template-file matrix", - "", - "| Template path | Rendered path | In Obsidian? | Ported from Obsidian | Not ported / notes |", - "|---------------|---------------|--------------|----------------------|--------------------|", - ] - ) - for trel, dest, ex, ported, np_note in rows: - dest_s = f"`{dest}`" - ported_esc = ported.replace("|", "\\|") - np_esc = np_note.replace("|", "\\|") - lines.append( - f"| `{trel}` | {dest_s} | {ex} | {ported_esc} | {np_esc} |", - ) - lines.extend( - [ - "", - "## Regenerate", - "", - "```bash", - "uv run python scripts/generate_obsidian_porting_report.py", - "```", - "", - ] - ) - - OUT_PATH.write_text("\n".join(lines) + "\n", encoding="utf-8") - print(f"Wrote {OUT_PATH} ({len(lines)} lines)") - return 0 - - -if __name__ == "__main__": - raise SystemExit(main()) diff --git a/scripts/generate_template_vs_obsidian_comparison.py b/scripts/generate_template_vs_obsidian_comparison.py deleted file mode 100644 index 957d52d..0000000 --- a/scripts/generate_template_vs_obsidian_comparison.py +++ /dev/null @@ -1,248 +0,0 @@ -#!/usr/bin/env python3 -"""Regenerate ``docs/template_vs_obsidian_playwright_pipeline.md``. - -Maps each ``template/`` file to its rendered path (Obsidian Copier answers), checks whether -that file exists in the reference project, and whether **text content matches** a fresh -``copier copy`` render (newline-normalised UTF-8). - -Edit ``OBS_ROOT`` if you compare against another generated tree. - -Usage: - uv run python scripts/generate_template_vs_obsidian_comparison.py -""" - -from __future__ import annotations - -import subprocess -import sys -import tempfile -from pathlib import Path - -import yaml -from jinja2 import Environment - -REPO_ROOT = Path(__file__).resolve().parents[1] -TEMPLATE_ROOT = REPO_ROOT / "template" -OUT_PATH = REPO_ROOT / "docs" / "template_vs_obsidian_playwright_pipeline.md" - -OBS_ROOT = Path("/Users/kzqr495/Documents/workspace/obsidian_playwright_pipeline") - - -def load_copier_data_file(answers_path: Path) -> dict[str, object]: - """Load consumer answers; drop keys starting with ``_`` for ``copier copy --data-file``.""" - raw = yaml.safe_load(answers_path.read_text(encoding="utf-8")) - if not isinstance(raw, dict): - msg = "Answers file must be a mapping at top level" - raise TypeError(msg) - return {k: v for k, v in raw.items() if not str(k).startswith("_")} - - -def render_path_segment(segment: str, answers: dict[str, object]) -> str: - """Render a path segment that may contain ``{{ package_name }}`` etc.""" - env = Environment(variable_start_string="{{", variable_end_string="}}") - return env.from_string(segment).render(**answers) - - -def template_file_to_dest_rel(path: Path, answers: dict[str, object]) -> str | None: - """Map a template source path to its rendered relative path, or None if skipped.""" - rel = path.relative_to(TEMPLATE_ROOT) - parts: list[str] = [] - for part in rel.parts: - if part.endswith(".jinja"): - stem = part[: -len(".jinja")] - rendered = render_path_segment(stem, answers) - if not rendered.strip(): - return None - parts.append(rendered) - else: - parts.append(render_path_segment(part, answers)) - return str(Path(*parts)) - - -def run_fresh_copier_render(data: dict[str, object], dest: Path) -> None: - """Run ``copier copy`` into ``dest`` using the given answer mapping.""" - dest.mkdir(parents=True, exist_ok=True) - with tempfile.NamedTemporaryFile( - mode="w", - suffix=".yml", - encoding="utf-8", - delete=False, - ) as tmp: - yaml.safe_dump(data, tmp, default_flow_style=False, sort_keys=True) - data_path = Path(tmp.name) - try: - cmd = [ - "copier", - "copy", - "--vcs-ref", - "HEAD", - str(REPO_ROOT), - str(dest), - "--trust", - "--defaults", - "--skip-tasks", - "--data-file", - str(data_path), - "-q", - ] - proc = subprocess.run(cmd, check=False, capture_output=True, text=True, cwd=REPO_ROOT) - if proc.returncode != 0: - print(proc.stderr or proc.stdout, file=sys.stderr) - msg = f"copier copy failed with exit {proc.returncode}" - raise RuntimeError(msg) - finally: - data_path.unlink(missing_ok=True) - - -def read_normalized_text(path: Path) -> str: - """Read UTF-8 text with CRLF/CR normalised to LF.""" - text = path.read_text(encoding="utf-8") - return text.replace("\r\n", "\n").replace("\r", "\n") - - -def content_matches_fresh_render( - rendered_path: str, - obsidian_file: Path, - fresh_root: Path, -) -> str: - """Return ``yes`` / ``no`` / ``N/A`` for the content-match column.""" - fresh_file = fresh_root / rendered_path - if not obsidian_file.is_file(): - return "N/A" - if not fresh_file.is_file(): - return "N/A" - try: - a = read_normalized_text(fresh_file) - b = read_normalized_text(obsidian_file) - except OSError: - return "N/A" - return "yes" if a == b else "no" - - -def build_markdown( - data: dict[str, object], - rows: list[tuple[str, str, str, str, str, str]], - skipped: list[str], -) -> str: - """Assemble the full markdown document.""" - present = sum(1 for r in rows if r[3] == "yes") - absent = sum(1 for r in rows if r[3] == "**no**") - identical = sum(1 for r in rows if r[4] == "yes") - differ = sum(1 for r in rows if r[4] == "no") - na_match = sum(1 for r in rows if r[4] == "N/A") - - answer_lines = [f"| `{key}` | `{data[key]}` |" for key in sorted(data.keys())] - missing_lines = [ - f"- `{trel}` → `{dest}`" for _k, trel, dest, ex, _m, _n in rows if ex == "**no**" - ] - table_lines = [ - f"| {kind} | `{trel}` | `{dest}` | {ex} | **{match}** | {note} |" - for kind, trel, dest, ex, match, note in rows - ] - skip_lines = [f"- `{s}` (`include_cli: false` → no `cli.py`)." for s in skipped] - - lines: list[str] = [ - "# Template vs `obsidian_playwright_pipeline` file comparison", - "", - "This document lists every file under [`template/`](../template/), maps it to the path " - "Copier would emit using the reference project's [`.copier-answers.yml`](file://" - f"{OBS_ROOT}/.copier-answers.yml), checks whether that path exists in `" - f"{OBS_ROOT}` today, and whether **file text matches** a **fresh** render from this " - "repository (`copier copy --vcs-ref HEAD --skip-tasks` with the same answers).", - "", - "Content comparison normalises newlines (CRLF → LF). Binary files are not supported; " - "treat unexpected decode failures as **N/A**.", - "", - "## Copier answers (path rendering)", - "", - "| Key | Value |", - "|-----|-------|", - *answer_lines, - "| `_copier_conf.answers_file` | `.copier-answers.yml` |", - "", - "## Summary", - "", - f"- **Template files mapped:** {len(rows)} (plus {len(skipped)} skipped for these answers)", - f"- **Expected path exists in reference project:** {present}", - f"- **Expected path missing:** {absent}", - f"- **Content matches fresh render:** {identical}", - f"- **Content differs from fresh render:** {differ}", - f"- **Content match N/A** (missing path or render): {na_match}", - "", - "If **Content match** is **no**, the reference project has drifted from the current " - "template (local edits, template updates since `_commit`, or refactored paths).", - "", - "### Missing expected paths (template → would emit → not found)", - "", - *missing_lines, - "", - "## Skipped template files (no output for these answers)", - "", - *skip_lines, - "", - "## Full table (all `template/` files)", - "", - "| Kind | Template path | Expected path | Exists | Content match | Notes |", - "|------|---------------|---------------|--------|---------------|-------|", - *table_lines, - "", - "## Regenerate", - "", - "```bash", - "uv run python scripts/generate_template_vs_obsidian_comparison.py", - "```", - "", - ] - return "\n".join(lines) + "\n" - - -def main() -> int: - """Write the comparison markdown.""" - if not TEMPLATE_ROOT.is_dir(): - print("template/ not found", file=sys.stderr) - return 1 - if not OBS_ROOT.is_dir(): - print(f"Reference project not found: {OBS_ROOT}", file=sys.stderr) - return 1 - answers_path = OBS_ROOT / ".copier-answers.yml" - if not answers_path.is_file(): - print(f"Missing {answers_path}", file=sys.stderr) - return 1 - - data = load_copier_data_file(answers_path) - path_ctx: dict[str, object] = {**data, "_copier_conf": {"answers_file": ".copier-answers.yml"}} - - with tempfile.TemporaryDirectory() as tmp: - fresh_root = Path(tmp) / "rendered" - run_fresh_copier_render(data, fresh_root) - - rows: list[tuple[str, str, str, str, str, str]] = [] - skipped: list[str] = [] - for p in sorted(TEMPLATE_ROOT.rglob("*")): - if p.is_dir() or p.name == ".DS_Store": - continue - trel = str(p.relative_to(TEMPLATE_ROOT)) - dest = template_file_to_dest_rel(p, path_ctx) - if dest is None: - skipped.append(trel) - continue - kind = "Jinja" if trel.endswith(".jinja") else "static" - obs_file = OBS_ROOT / dest - exists = obs_file.is_file() - ex = "yes" if exists else "**no**" - match = content_matches_fresh_render(dest, obs_file, fresh_root) - note = ( - "—" - if kind == "static" - else "Rendered from `.jinja`; match uses fresh `copier copy`." - ) - rows.append((kind, trel, dest, ex, match, note)) - - text = build_markdown(data, rows, skipped) - OUT_PATH.write_text(text, encoding="utf-8") - print(f"Wrote {OUT_PATH}") - return 0 - - -if __name__ == "__main__": - raise SystemExit(main()) diff --git a/scripts/pr_commit_policy.py b/scripts/pr_commit_policy.py index 0affebf..d4ff015 100644 --- a/scripts/pr_commit_policy.py +++ b/scripts/pr_commit_policy.py @@ -8,6 +8,7 @@ import re import subprocess import sys +from pathlib import Path from typing import Final # Conventional commit types (aligned with Commitizen ``cz_conventional_commits`` / commit-msg hook). @@ -69,6 +70,85 @@ def validate_pr_title(title: str) -> str | None: return validate_conventional_subject_line(title.split("\n", maxsplit=1)[0]) +def suggest_title_from_branch(branch: str) -> str | None: + """Derive a Conventional-Commits title from ``type/slug-slug`` branch names. + + Args: + branch: Short branch name (no ``refs/heads/``) or full ref. + + Returns: + A title such as ``chore: sync skip list``, or ``None`` if the branch does not + start with a known conventional type prefix. + """ + b = branch.strip() + if b.startswith("refs/heads/"): + b = b[11:] + m = re.match(rf"^({_TYPES})/(.+)$", b) + if not m: + return None + typ, tail = m.group(1), m.group(2) + subject = tail.replace("/", " ") + subject = re.sub(r"[-_]+", " ", subject) + subject = re.sub(r"\s+", " ", subject).strip() + if not subject: + return None + line = f"{typ}: {subject}" + if len(line) > _MAX_SUBJECT_LEN: + line = line[:_MAX_SUBJECT_LEN].rstrip() + return line + + +def suggest_title_from_git(repo_cwd: Path) -> str | None: + """Return the latest commit subject if it already satisfies conventional rules.""" + result = subprocess.run( + ["git", "-C", str(repo_cwd), "log", "-1", "--format=%s"], + capture_output=True, + text=True, + check=False, + ) + if result.returncode != 0: + return None + subject = result.stdout.strip() + if validate_conventional_subject_line(subject) is None: + return subject + return None + + +def changes_introduced_markdown(repo_cwd: Path, base_ref: str, head_ref: str) -> str: + """Build bullet lines from ``git log`` for the PR template *Changes introduced* section.""" + proc = subprocess.run( + [ + "git", + "-C", + str(repo_cwd), + "log", + "--reverse", + "--format=- %s", + f"{base_ref}..{head_ref}", + ], + capture_output=True, + text=True, + check=False, + ) + if proc.returncode != 0: + return "- (could not list commits; describe changes here)" + lines = [ln.rstrip() for ln in proc.stdout.splitlines() if ln.strip()] + if not lines: + return "- (no commits ahead of base; describe changes here)" + return "\n".join(lines) + + +def draft_pr_body(repo_root: Path, base_ref: str, head_ref: str) -> str: + """Return PR body text from the template with placeholders replaced.""" + template_path = repo_root / ".github" / "PULL_REQUEST_TEMPLATE.md" + text = template_path.read_text(encoding="utf-8") + bullets = changes_introduced_markdown(repo_root, base_ref, head_ref) + block = "- Change 1\n- Change 2\n- Change 3 (if applicable)" + if block not in text: + return text + return text.replace(block, bullets, 1) + + def validate_pr_body(body: str | None) -> str | None: """Return an error message if the PR body does not follow the template; else None.""" if body is None or not body.strip(): @@ -118,6 +198,81 @@ def validate_commit_range(base: str, head: str) -> str | None: return None +def _git_toplevel() -> Path: + result = subprocess.run( + ["git", "rev-parse", "--show-toplevel"], + capture_output=True, + text=True, + check=False, + ) + if result.returncode != 0: + raise RuntimeError("not a git repository (git rev-parse --show-toplevel failed)") + return Path(result.stdout.strip()) + + +def _resolve_base_ref(repo: Path, explicit: str | None) -> str: + if explicit: + return explicit + for ref in ("origin/main", "main"): + chk = subprocess.run( + ["git", "-C", str(repo), "rev-parse", "--verify", ref], + capture_output=True, + text=True, + check=False, + ) + if chk.returncode == 0: + return ref + raise RuntimeError( + "could not resolve base ref (try: git fetch origin main && " + "pr_commit_policy.py draft --base origin/main)", + ) + + +def _current_branch(repo: Path) -> str: + result = subprocess.run( + ["git", "-C", str(repo), "rev-parse", "--abbrev-ref", "HEAD"], + capture_output=True, + text=True, + check=False, + ) + if result.returncode != 0: + raise RuntimeError("git rev-parse --abbrev-ref HEAD failed") + return result.stdout.strip() + + +def _cmd_draft( + repo: Path, + base_ref: str | None, + head_ref: str | None, + title_only: bool, + body_only: bool, +) -> int: + """Print a policy-compliant PR title and body for copy-paste or ``gh pr edit``.""" + base = _resolve_base_ref(repo, base_ref) + head = head_ref or "HEAD" + branch = _current_branch(repo) + title = suggest_title_from_branch(branch) or suggest_title_from_git(repo) + if not title: + title = "chore: describe this pull request" + body = draft_pr_body(repo, base, head) + if title_only and body_only: + print(title) + print("\n---\n") + print(body) + return 0 + if not title_only and not body_only: + print("Suggested PR title (GitHub UI or: gh pr edit --title '...'):\n") + print(title) + print("\n---\n\nSuggested PR body (paste in GitHub or: gh pr edit --body-file ...):\n") + print(body) + return 0 + if title_only: + print(title) + return 0 + print(body) + return 0 + + def _cmd_pr() -> int: title = os.environ.get("PR_TITLE", "") body = os.environ.get("PR_BODY") @@ -147,6 +302,37 @@ def main() -> int: sub.add_parser("pr", help="validate PR_TITLE and PR_BODY environment variables") + p_draft = sub.add_parser( + "draft", + help="print a conventional PR title and filled template body (local automation)", + ) + p_draft.add_argument( + "--repo", + type=Path, + default=None, + help="repository root (default: git top-level of cwd)", + ) + p_draft.add_argument( + "--base", + default=None, + help="base ref for git log (default: origin/main or main)", + ) + p_draft.add_argument( + "--head", + default=None, + help="head ref (default: HEAD)", + ) + p_draft.add_argument( + "--title-only", + action="store_true", + help="print only the suggested title line", + ) + p_draft.add_argument( + "--body-only", + action="store_true", + help="print only the suggested body", + ) + p_commits = sub.add_parser("commits", help="validate git commits in a range") p_commits.add_argument( "--base", @@ -162,6 +348,26 @@ def main() -> int: args = parser.parse_args() if args.command == "pr": return _cmd_pr() + if args.command == "draft": + try: + repo = args.repo.resolve() if args.repo else _git_toplevel() + except RuntimeError as exc: + print(f"pr_commit_policy: draft: {exc}", file=sys.stderr) + return 1 + try: + return _cmd_draft( + repo, + args.base, + args.head, + title_only=args.title_only, + body_only=args.body_only, + ) + except RuntimeError as exc: + print(f"pr_commit_policy: draft: {exc}", file=sys.stderr) + return 1 + except OSError as exc: + print(f"pr_commit_policy: draft: {exc}", file=sys.stderr) + return 1 if args.command == "commits": if not args.base or not args.head: print( diff --git a/scripts/update_files.sh b/scripts/update_files.sh deleted file mode 100644 index e85c8d8..0000000 --- a/scripts/update_files.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh -# Sync VS Code settings (these should be identical between root and template) -cp .vscode/settings.json template/.vscode/settings.json.jinja -cp .vscode/launch.json template/.vscode/launch.json.jinja -cp .vscode/extensions.json template/.vscode/extensions.json.jinja - -# WARNING: .gitignore and .pre-commit-config.yaml have template-specific -# additions. Do NOT blindly copy. Instead, manually diff and merge: -# diff .gitignore template/.gitignore.jinja -# diff .pre-commit-config.yaml template/.pre-commit-config.yaml.jinja -echo "NOTE: .gitignore and .pre-commit-config.yaml must be manually diffed." -echo " template versions have extra entries (forbidden-rej-files hook, etc.)" diff --git a/template/.claude/rules/python/hooks.md b/template/.claude/rules/python/hooks.md index 88758c5..f92d2af 100644 --- a/template/.claude/rules/python/hooks.md +++ b/template/.claude/rules/python/hooks.md @@ -77,6 +77,18 @@ the same scope; running both would warn and then block on the same condition. Both source/test hooks only check top-level package modules (`src//.py`, excluding `__init__.py`). Nested packages are skipped. +### How to swap to the warn-only reminder + +The default strict hook (`pre-write-src-require-test.sh`) blocks any write to +`src//.py` when the matching test file is missing. If you prefer a non-blocking +reminder, swap the registration in `.claude/settings.json`: + +1. Locate the `PreToolUse` entry whose `command` is + `bash .claude/hooks/pre-write-src-require-test.sh`. +2. Replace `pre-write-src-require-test.sh` with `pre-write-src-test-reminder.sh` in that + entry. +3. Register only one at a time. Registering both produces duplicate output on every write. + ## Refactor test guard (PostToolUse) | Hook | Trigger | What it does | diff --git a/tests/CLAUDE.md b/tests/CLAUDE.md index c08682b..8c35f1a 100644 --- a/tests/CLAUDE.md +++ b/tests/CLAUDE.md @@ -11,7 +11,10 @@ optional features are gated properly. | `test_template.py` | Main integration suite: renders the template and asserts output | | `test_pr_commit_policy.py` | Unit tests for `scripts/pr_commit_policy.py` (PR body + commit subject rules) | | `test_root_template_sync.py` | Tests for `scripts/check_root_template_sync.py` | +| `test_check_root_template_sync.py` | CLI `--help` smoke test for the sync checker | | `test_repo_file_freshness.py` | Unit tests for `scripts/repo_file_freshness.py` | +| `test_bump_version.py` | Unit tests for `scripts/bump_version.py` | +| `test_sync_skip_if_exists.py` | Unit tests for `scripts/sync_skip_if_exists.py` | ## How `test_template.py` works diff --git a/tests/test_bump_version.py b/tests/test_bump_version.py new file mode 100644 index 0000000..cc86ac9 --- /dev/null +++ b/tests/test_bump_version.py @@ -0,0 +1,83 @@ +"""Tests for ``scripts/bump_version`` (PEP 440 bumps and ``pyproject.toml`` I/O).""" + +from __future__ import annotations + +import importlib.util +import subprocess +import sys +from pathlib import Path +from typing import Literal, cast + +import pytest + +_REPO_ROOT = Path(__file__).resolve().parent.parent +_SCRIPT = _REPO_ROOT / "scripts" / "bump_version.py" +_SPEC = importlib.util.spec_from_file_location("bump_version", _SCRIPT) +assert _SPEC and _SPEC.loader +_mod = importlib.util.module_from_spec(_SPEC) +sys.modules["bump_version"] = _mod +_SPEC.loader.exec_module(_mod) +bv = _mod + + +def test_version_parse_accepts_simple_triplet() -> None: + """Parse a standard ``X.Y.Z`` string into components.""" + v = bv.Version.parse(" 2.10.3 ") + assert v.major == 2 and v.minor == 10 and v.patch == 3 + + +def test_version_parse_rejects_invalid() -> None: + """Non-numeric or wrong segment counts raise ``ValueError``.""" + with pytest.raises(ValueError): + bv.Version.parse("1.2") + with pytest.raises(ValueError): + bv.Version.parse("a.b.c") + + +@pytest.mark.parametrize( + ("kind", "expected"), + [ + ("patch", (1, 2, 4)), + ("minor", (1, 3, 0)), + ("major", (2, 0, 0)), + ], +) +def test_version_bumped(kind: str, expected: tuple[int, int, int]) -> None: + """``bumped`` applies patch, minor, or major semantics.""" + base = bv.Version(1, 2, 3) + out = base.bumped(cast(Literal["patch", "minor", "major"], kind)) + assert (out.major, out.minor, out.patch) == expected + + +def test_read_and_write_project_version_roundtrip(tmp_path: Path) -> None: + """Read version from a minimal ``pyproject.toml`` and write a new one back.""" + path = tmp_path / "pyproject.toml" + path.write_text( + '[project]\nname = "x"\nversion = "0.1.0"\n\n[tool.x]\nkey = "v"\n', + encoding="utf-8", + ) + assert bv._read_project_version(path) == bv.Version(0, 1, 0) + bv._write_project_version(path, bv.Version(0, 1, 1)) + assert bv._read_project_version(path) == bv.Version(0, 1, 1) + + +def test_cli_new_version_prints_and_updates(tmp_path: Path) -> None: + """``--new-version`` updates the file and prints the version on stdout.""" + path = tmp_path / "pyproject.toml" + path.write_text('[project]\nname = "x"\nversion = "1.0.0"\n', encoding="utf-8") + proc = subprocess.run( + [ + sys.executable, + str(_SCRIPT), + "--pyproject", + str(path), + "--new-version", + "1.0.1", + ], + capture_output=True, + text=True, + check=False, + ) + assert proc.returncode == 0 + assert proc.stdout.strip() == "1.0.1" + assert 'version = "1.0.1"' in path.read_text(encoding="utf-8") diff --git a/tests/test_check_root_template_sync.py b/tests/test_check_root_template_sync.py new file mode 100644 index 0000000..f289dba --- /dev/null +++ b/tests/test_check_root_template_sync.py @@ -0,0 +1,25 @@ +"""CLI smoke tests for ``scripts/check_root_template_sync``. + +Detailed policy scenarios are in ``test_root_template_sync.py`` (integration-style name). +""" + +from __future__ import annotations + +import subprocess +import sys +from pathlib import Path + +_REPO_ROOT = Path(__file__).resolve().parent.parent +_SCRIPT = _REPO_ROOT / "scripts" / "check_root_template_sync.py" + + +def test_check_root_template_sync_help_exits_zero() -> None: + """The script exposes argparse ``--help`` and exits successfully.""" + proc = subprocess.run( + [sys.executable, str(_SCRIPT), "--help"], + capture_output=True, + text=True, + check=False, + ) + assert proc.returncode == 0 + assert "Check root/template sync" in proc.stdout or "sync" in proc.stdout.lower() diff --git a/tests/test_pr_commit_policy.py b/tests/test_pr_commit_policy.py index ad2e06b..674f11d 100644 --- a/tests/test_pr_commit_policy.py +++ b/tests/test_pr_commit_policy.py @@ -275,3 +275,27 @@ def test_validate_commit_range_rejects_bad_subject(tmp_path: Path) -> None: os.chdir(cwd) assert err is not None assert "bad subject" in err + + +@pytest.mark.parametrize( + ("branch", "expected"), + [ + ("chore/repo-cleanup-standards-remediation", "chore: repo cleanup standards remediation"), + ("feat/add-widget", "feat: add widget"), + ("fix/api/handle-null", "fix: api handle null"), + ], +) +def test_suggest_title_from_branch(branch: str, expected: str) -> None: + """Branch names ``type/slug`` map to ``type: slug`` with hyphens as spaces.""" + assert pcp.suggest_title_from_branch(branch) == expected + + +def test_suggest_title_from_branch_unknown_prefix() -> None: + """Branches without a known type prefix yield None.""" + assert pcp.suggest_title_from_branch("main") is None + assert pcp.suggest_title_from_branch("dependabot/pip/foo-1.0") is None + + +def test_suggest_title_from_branch_strips_ref() -> None: + """``refs/heads/`` prefix is ignored.""" + assert pcp.suggest_title_from_branch("refs/heads/docs/update-readme") == "docs: update readme" diff --git a/tests/test_sync_skip_if_exists.py b/tests/test_sync_skip_if_exists.py new file mode 100644 index 0000000..f43ed01 --- /dev/null +++ b/tests/test_sync_skip_if_exists.py @@ -0,0 +1,66 @@ +"""Tests for ``scripts/sync_skip_if_exists`` (``_skip_if_exists`` parsing and updates).""" + +from __future__ import annotations + +import importlib.util +import sys +from pathlib import Path + +_REPO_ROOT = Path(__file__).resolve().parent.parent +_SCRIPT = _REPO_ROOT / "scripts" / "sync_skip_if_exists.py" +_SPEC = importlib.util.spec_from_file_location("sync_skip_if_exists", _SCRIPT) +assert _SPEC and _SPEC.loader +_mod = importlib.util.module_from_spec(_SPEC) +sys.modules["sync_skip_if_exists"] = _mod +_SPEC.loader.exec_module(_mod) +ssi = _mod + + +def test_read_skip_block_extracts_entries() -> None: + """Parse YAML lines under ``_skip_if_exists`` into ordered entries.""" + text = """_skip_if_exists: + - foo + - bar + +_other: true +""" + parsed = ssi.read_skip_block(text) + assert parsed is not None + entries, start, end = parsed + assert entries == ["foo", "bar"] + assert start == 0 + assert end == 3 + + +def test_read_skip_block_missing_returns_none() -> None: + """When the key is absent, return ``None``.""" + assert ssi.read_skip_block("name: x\n") is None + + +def test_replace_skip_block_rewrites_list() -> None: + """Replace the list while preserving surrounding content.""" + text = """top: true +_skip_if_exists: + - old + +tail: end +""" + out = ssi.replace_skip_block(text, ["a", "b"]) + assert "_skip_if_exists:\n" in out + assert " - a\n" in out and " - b\n" in out + assert "old" not in out + assert "top: true" in out and "tail: end" in out + + +def test_compute_desired_entries_includes_base(tmp_path: Path) -> None: + """``BASE_SKIP_ENTRIES`` are always part of the desired set.""" + # Minimal repo layout: only copier.yml is not required for compute_desired_entries + root = tmp_path + desired = set(ssi.compute_desired_entries(root)) + for item in ssi.BASE_SKIP_ENTRIES: + assert item in desired + + +def test_repo_root_points_at_workspace_parent() -> None: + """``repo_root`` resolves to the directory containing ``scripts/``.""" + assert ssi.repo_root() == _REPO_ROOT