diff --git a/.github/workflows/pr-orchestrator.yml b/.github/workflows/pr-orchestrator.yml index d75d4130..b865862b 100644 --- a/.github/workflows/pr-orchestrator.yml +++ b/.github/workflows/pr-orchestrator.yml @@ -210,7 +210,7 @@ jobs: run: | python -m pip install --upgrade pip pip install "hatch" "virtualenv<21" coverage "coverage[toml]" pytest pytest-cov pytest-mock pytest-asyncio pytest-xdist pytest-timeout - pip install -e . + pip install -e ".[dev]" - name: Cache hatch environments if: needs.changes.outputs.skip_tests_dev_to_main != 'true' @@ -495,7 +495,7 @@ jobs: - name: Install type-check dependencies run: | python -m pip install --upgrade pip - pip install -e . basedpyright + pip install -e ".[dev]" - name: Run type checking run: | echo "πŸ” Running basedpyright type checking..." @@ -533,7 +533,7 @@ jobs: - name: Install lint dependencies run: | python -m pip install --upgrade pip - pip install -e . ruff basedpyright pylint + pip install -e ".[dev]" - name: Run linting run: | diff --git a/.github/workflows/specfact.yml b/.github/workflows/specfact.yml index f89e0d19..f883e94b 100644 --- a/.github/workflows/specfact.yml +++ b/.github/workflows/specfact.yml @@ -62,8 +62,8 @@ jobs: - name: Install SpecFact CLI run: | - echo "πŸ“¦ Installing SpecFact CLI..." - pip install -e . + echo "πŸ“¦ Installing SpecFact CLI (contracts extra for repro / CrossHair)..." + pip install -e ".[contracts]" - name: Enforce Core-Module Isolation run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index 244f307d..62c7e0e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,31 @@ All notable changes to this project will be documented in this file. --- +## [0.45.1] - 2026-04-03 + +### Changed + +- **Dependency install profiles**: the default wheel is slimmerβ€”CrossHair, Hypothesis, Ruff, Radon, + and unused pins (`python-dotenv`, `cffi`) are no longer in core `dependencies`. Use + `pip install specfact-cli[contracts]` for CrossHair + Hypothesis, or `pip install specfact-cli[dev]` + for contributors. `packaging` is pinned explicitly for module installer / PEP 440 use. +- **Smart-test baseline fallback**: incremental smart-test runs now establish a full-suite baseline when + no `last_full_run` cache exists (avoids a no-op incremental pass and misleading zero coverage). +- **Pre-commit single-invocation overwrite handling**: staged Python files are passed to the code-review + helper in one batch so `.specfact/code-review.json` is not overwritten by multiple `xargs` processes. + +### Fixed + +- Missing bundle UX: when workflow bundles are not installed, the CLI now reports the + **marketplace module** (e.g. `nold-ai/specfact-codebase` for the `code` group) instead of + `Command 'code' is not installed`, which was easy to confuse with the VS Code `code` CLI. + +- Generated GitHub workflow (`resources/templates/github-action.yml.j2`): GitHub Actions `if` + conditions now use `${{ … }}` so annotations, PR comment, and fail steps evaluate correctly + on GitHub (avoids mixed `always() &&` / raw expression parsing issues). + +--- + ## [0.44.0] - 2026-03-31 ### Added diff --git a/README.md b/README.md index 8b4653e9..10c593a9 100644 --- a/README.md +++ b/README.md @@ -71,26 +71,40 @@ With SpecFact, you get: ## How do I get started? -### Start Here (5 minutes) +### Start Here (about 2 minutes): scored code review β€” no `pip install` -### Install +**Point SpecFact at your code.** From a **git repository** (any branch), run two commands: ```bash -# Zero-install (recommended) -uvx specfact-cli@latest - -# Or install globally -pip install -U specfact-cli +uvx specfact-cli init --profile solo-developer +uvx specfact-cli code review run --path . --scope full ``` -### Bootstrap +You should see a **Verdict** (PASS/FAIL), a **Score**, and categorized **findings** β€” the fastest way to see SpecFact on real code before you dive into backlog, specs, or CI. + +- **Command 1** installs the `solo-developer` bundles (including `specfact-codebase` and `specfact-code-review`) into your user module store so `code review` and related commands are available on the next invocation. +- **Command 2** runs the clean-code review on the repo at `.`. Use **`--scope full`** on the first run so review does not depend on having local git changes. + +**Already installed the CLI?** Use the same flow with `specfact` instead of `uvx specfact-cli`: ```bash -# Recommended first run specfact init --profile solo-developer +specfact code review run --path . --scope full ``` -### Get First Value +**Read the canonical walkthrough:** **[Documentation β€” Quickstart](https://docs.specfact.io/getting-started/quickstart/)** Β· **[Installation](https://docs.specfact.io/getting-started/installation/)** (uvx-first, then persistent install). + +### Install (persistent CLI for daily use) + +```bash +pip install -U specfact-cli +``` + +You can still use **`uvx specfact-cli@latest ...`** anytime without installing; it always fetches the latest published CLI. + +### After the wow path: deeper workflows + +When you want analysis, snapshots, or sidecar validation on top of the review layer: ```bash # Analyze a codebase you care about @@ -104,10 +118,7 @@ specfact code validate sidecar init my-project /path/to/repo specfact code validate sidecar run my-project /path/to/repo ``` -That path gives you a concrete first win: SpecFact understands your project context and gives you a -validated starting point instead of jumping straight into blind change work. - -### AI IDE Setup +### AI IDE setup ```bash specfact init ide @@ -125,7 +136,7 @@ your IDE. If module prompt payloads are not installed yet, the CLI uses packaged Use SpecFact as the validation layer around fast-moving implementation work. Start with: -- `specfact init --profile solo-developer` +- `uvx specfact-cli init --profile solo-developer` then `uvx specfact-cli code review run --path . --scope full` (see **Start Here** above) - `specfact code validate sidecar init /path/to/repo` - `specfact code validate sidecar run /path/to/repo` @@ -303,10 +314,11 @@ Use `https://modules.specfact.io/` for the in-depth backlog, project, spec, gove ## How It Works (High Level) -1. **Bootstrap**: install the CLI and initialize the official bundles you need. -2. **Analyze or sync**: import code, connect backlog systems, or sync external artifacts into project bundles. -3. **Validate**: run spec, governance, and sidecar validation flows before implementation or release. -4. **Iterate safely**: use module-provided workflows while the core runtime keeps command mounting, trust, and lifecycle consistent. +1. **Bootstrap**: use **uvx** or **pip**, then `init --profile` to install the bundles you need (for example `solo-developer` for a scored **code review** first). +2. **Review or analyze**: run **`code review run`** on a repo, or import code and snapshot state for deeper workflows. +3. **Sync**: connect backlog systems or sync external artifacts into project bundles when you are ready. +4. **Validate**: run spec, governance, and sidecar validation flows before implementation or release. +5. **Iterate safely**: use module-provided workflows while the core runtime keeps command mounting, trust, and lifecycle consistent. ## Where SpecFact Fits diff --git a/docs/core-cli/modes.md b/docs/core-cli/modes.md index 77da5257..963433ef 100644 --- a/docs/core-cli/modes.md +++ b/docs/core-cli/modes.md @@ -179,7 +179,7 @@ The `import from-code` command now uses mode-aware routing. You should see mode ```bash # Test with CI/CD mode (bundle name as positional argument) -hatch run specfact --mode cicd import from-code test-project --repo . --confidence 0.5 --shadow-only +hatch run specfact --mode cicd code import from-code test-project --repo . --confidence 0.5 --shadow-only # Expected output: # Mode: CI/CD (direct execution) @@ -189,7 +189,7 @@ hatch run specfact --mode cicd import from-code test-project --repo . --confiden ```bash # Test with CoPilot mode (bundle name as positional argument) -hatch run specfact --mode copilot import from-code test-project --repo . --confidence 0.5 --shadow-only +hatch run specfact --mode copilot code import from-code test-project --repo . --confidence 0.5 --shadow-only # Expected output: # Mode: CoPilot (agent routing) @@ -249,7 +249,7 @@ hatch run specfact code import my-project --repo . --confidence 0.7 ```bash # Developer wants CI/CD mode even though CoPilot is available (bundle name as positional argument) -hatch run specfact --mode cicd import from-code my-project --repo . --confidence 0.7 +hatch run specfact --mode cicd code import from-code my-project --repo . --confidence 0.7 # Expected: Mode: CI/CD (direct execution) - flag overrides auto-detection ``` diff --git a/docs/examples/integration-showcases/integration-showcases-quick-reference.md b/docs/examples/integration-showcases/integration-showcases-quick-reference.md index 79871720..103ad013 100644 --- a/docs/examples/integration-showcases/integration-showcases-quick-reference.md +++ b/docs/examples/integration-showcases/integration-showcases-quick-reference.md @@ -77,7 +77,7 @@ cd /tmp/specfact-integration-tests/example1_vscode # The AI will prompt for a plan name - suggest: "Payment Processing" # Alternative: CLI-only mode (bundle name as positional argument) -specfact --no-banner import from-code payment-processing --repo . --output-format yaml +specfact --no-banner code import from-code payment-processing --repo . --output-format yaml # Step 2: Run enforcement specfact --no-banner enforce stage --preset balanced @@ -95,7 +95,7 @@ specfact --no-banner enforce stage --preset balanced cd /tmp/specfact-integration-tests/example2_cursor # Step 1: Import code (bundle name as positional argument) -specfact --no-banner import from-code data-pipeline --repo . --output-format yaml +specfact --no-banner code import from-code data-pipeline --repo . --output-format yaml # Step 2: Test original (should pass) specfact --no-banner enforce stage --preset balanced @@ -117,7 +117,7 @@ specfact --no-banner plan compare src/pipeline.py src/pipeline_broken.py --fail- cd /tmp/specfact-integration-tests/example3_github_actions # Step 1: Import code (bundle name as positional argument) -specfact --no-banner import from-code user-api --repo . --output-format yaml +specfact --no-banner code import from-code user-api --repo . --output-format yaml # Step 2: Run enforcement specfact --no-banner enforce stage --preset balanced @@ -135,7 +135,7 @@ specfact --no-banner enforce stage --preset balanced cd /tmp/specfact-integration-tests/example4_precommit # Step 1: Initial commit (bundle name as positional argument) -specfact --no-banner import from-code order-processor --repo . --output-format yaml +specfact --no-banner code import from-code order-processor --repo . --output-format yaml git add . git commit -m "Initial code" @@ -220,7 +220,7 @@ for dir in example1_vscode example2_cursor example3_github_actions example4_prec echo "Testing $dir..." cd /tmp/specfact-integration-tests/$dir bundle_name=$(echo "$dir" | sed 's/example[0-9]_//') - specfact --no-banner import from-code "$bundle_name" --repo . --output-format yaml 2>&1 + specfact --no-banner code import from-code "$bundle_name" --repo . --output-format yaml 2>&1 specfact --no-banner enforce stage --preset balanced 2>&1 echo "---" done diff --git a/docs/examples/integration-showcases/integration-showcases-testing-guide.md b/docs/examples/integration-showcases/integration-showcases-testing-guide.md index 5b5e58b2..2f5cef77 100644 --- a/docs/examples/integration-showcases/integration-showcases-testing-guide.md +++ b/docs/examples/integration-showcases/integration-showcases-testing-guide.md @@ -223,7 +223,7 @@ def process_payment(request): ### Option B: CLI-only (For Integration Testing) ```bash -uvx specfact-cli@latest --no-banner import from-code --repo . --output-format yaml +uvx specfact-cli@latest --no-banner code import from-code --repo . --output-format yaml ``` **Note**: CLI-only mode uses AST-based analysis and may show "0 features" for minimal test cases. This is expected and the plan bundle is still created for manual contract addition. @@ -234,17 +234,17 @@ uvx specfact-cli@latest --no-banner import from-code --repo . --output-format ya - **Repeated runs**: Use `--no-banner` **before** the command to suppress banner output - **Important**: `--no-banner` is a global parameter and must come **before** the subcommand, not after - βœ… Correct: `specfact --no-banner enforce stage --preset balanced` - - βœ… Correct: `uvx specfact-cli@latest --no-banner import from-code --repo . --output-format yaml` + - βœ… Correct: `uvx specfact-cli@latest --no-banner code import from-code --repo . --output-format yaml` - ❌ Wrong: `specfact govern enforce stage --preset balanced --no-banner` - - ❌ Wrong: `uvx specfact-cli@latest import from-code --repo . --output-format yaml --no-banner` + - ❌ Wrong: `uvx specfact-cli@latest code import from-code --repo . --output-format yaml --no-banner` -**Note**: The `import from-code` command analyzes the entire repository/directory, not individual files. It will automatically detect and analyze all Python files in the current directory. +**Note**: The `code import from-code` command analyzes the entire repository/directory, not individual files. It will automatically detect and analyze all Python files in the current directory. **Important**: These examples are designed for **interactive AI assistant usage** (slash commands in Cursor, VS Code, etc.), not CLI-only execution. **CLI vs Interactive Mode**: -- **CLI-only** (`uvx specfact-cli@latest import from-code` or `specfact code import`): Uses AST-based analyzer (CI/CD mode) +- **CLI-only** (`uvx specfact-cli@latest code import from-code` or `specfact code import`): Uses AST-based analyzer (CI/CD mode) - May show "0 features" for minimal test cases - Limited to AST pattern matching - Works but may not detect all features in simple examples @@ -368,7 +368,7 @@ specfact --no-banner plan review django-example \ - βœ… Stories are present in the plan bundle - βœ… Acceptance criteria are complete and testable -**Note**: Contracts are **automatically extracted** during `import from-code` by the AST analyzer, but only if function signatures have type hints. For the async bug detection example, detecting "blocking I/O in async context" requires additional analysis (Semgrep async patterns, not just AST contracts). +**Note**: Contracts are **automatically extracted** during `code import from-code` by the AST analyzer, but only if function signatures have type hints. For the async bug detection example, detecting "blocking I/O in async context" requires additional analysis (Semgrep async patterns, not just AST contracts). #### Step 3.4: Set Up Enforcement Configuration @@ -511,7 +511,7 @@ Fix the blocking deviations or adjust enforcement config **What We've Accomplished**: -1. βœ… Created plan bundle from code (`import from-code`) +1. βœ… Created plan bundle from code (`code import from-code`) 2. βœ… Enriched plan with semantic understanding (added feature and stories) 3. βœ… Reviewed plan and added missing stories via CLI 4. βœ… Configured enforcement (balanced preset) @@ -637,7 +637,7 @@ Stories added (4 total): **Alternative**: CLI-only mode: ```bash -uvx specfact-cli@latest --no-banner import from-code --repo . --output-format yaml +uvx specfact-cli@latest --no-banner code import from-code --repo . --output-format yaml ``` **Note**: Interactive mode creates valid plan bundles with features. CLI-only may show 0 features for minimal test cases. Use `--no-banner` before the command to suppress banner output: `specfact --no-banner `. @@ -877,7 +877,7 @@ mv src/pipeline.py src/pipeline_original.py mv src/pipeline_broken.py src/pipeline.py # 3. Import broken code to create new plan -specfact --no-banner import from-code pipeline-broken --repo . --output-format yaml +specfact --no-banner code import from-code pipeline-broken --repo . --output-format yaml # 4. Compare new plan (from broken code) against enriched plan specfact --no-banner plan compare \ @@ -902,7 +902,7 @@ mv src/pipeline_original.py src/pipeline.py **What We've Accomplished**: -1. βœ… Created plan bundle from code (`import from-code`) +1. βœ… Created plan bundle from code (`code import from-code`) 2. βœ… Enriched plan with semantic understanding (added FEATURE-DATAPROCESSOR and 4 stories) 3. βœ… Reviewed plan and improved quality (added target users, value hypothesis, feature acceptance criteria, enhanced story acceptance criteria with Given/When/Then format) 4. βœ… Configured enforcement (balanced preset with HIGH β†’ BLOCK, MEDIUM β†’ WARN, LOW β†’ LOG) @@ -973,7 +973,7 @@ def get_user_stats(user_id: str) -> dict: **Alternative**: CLI-only mode: ```bash -specfact --no-banner import from-code --repo . --output-format yaml +specfact --no-banner code import from-code --repo . --output-format yaml ``` **Note**: Interactive mode creates valid plan bundles with features. CLI-only may show 0 features for minimal test cases. Use `--no-banner` before the command to suppress banner output: `specfact --no-banner `. @@ -1064,7 +1064,7 @@ This automatically generates `[tool.crosshair]` configuration in `pyproject.toml **What We've Accomplished**: -1. βœ… Created plan bundle from code (`import from-code`) +1. βœ… Created plan bundle from code (`code import from-code`) 2. βœ… Enriched plan with semantic understanding (if using interactive mode) 3. βœ… Configured enforcement (balanced preset) 4. βœ… Ran validation suite (`specfact code repro`) @@ -1143,7 +1143,7 @@ result = process_order(order_id="123") **Alternative**: CLI-only mode: ```bash -specfact --no-banner import from-code --repo . --output-format yaml +specfact --no-banner code import from-code --repo . --output-format yaml ``` **Important**: After creating the initial plan, we need to make it the default plan so `plan compare --code-vs-plan` can find it. Use `plan select` to set it as the active plan: @@ -1225,7 +1225,7 @@ Create `.git/hooks/pre-commit`: #!/bin/sh # First, import current code to create a new plan for comparison # Use default name "auto-derived" so plan compare --code-vs-plan can find it -specfact --no-banner import from-code --repo . --output-format yaml > /dev/null 2>&1 +specfact --no-banner code import from-code --repo . --output-format yaml > /dev/null 2>&1 # Then compare: uses active plan (set via plan select) as manual, latest code-derived plan as auto specfact --no-banner plan compare --code-vs-plan @@ -1243,7 +1243,7 @@ specfact --no-banner plan compare --code-vs-plan **Note**: The `--code-vs-plan` flag automatically uses: - **Manual plan**: The active plan (set via `plan select`) or `main.bundle.yaml` as fallback -- **Auto plan**: The latest `auto-derived` project bundle (from `import from-code auto-derived` or default bundle name) +- **Auto plan**: The latest `auto-derived` project bundle (from `code import from-code auto-derived` or default bundle name) Make it executable: @@ -1320,7 +1320,7 @@ Fix the blocking deviations or adjust enforcement config **What We've Accomplished**: -1. βœ… Created initial plan bundle from original code (`import from-code`) +1. βœ… Created initial plan bundle from original code (`code import from-code`) 2. βœ… Committed the original plan (baseline) 3. βœ… Modified code to introduce breaking change (added required `user_id` parameter) 4. βœ… Configured enforcement (balanced preset with HIGH β†’ BLOCK) @@ -1578,7 +1578,7 @@ rm -rf specfact-integration-tests **What's Validated**: -- βœ… Plan bundle creation (`import from-code`) +- βœ… Plan bundle creation (`code import from-code`) - βœ… Plan enrichment (LLM adds features and stories) - βœ… Plan review (identifies missing items) - βœ… Story addition via CLI (`plan add-story`) @@ -1605,7 +1605,7 @@ rm -rf specfact-integration-tests **What's Validated**: -- βœ… Plan bundle creation (`import from-code`) +- βœ… Plan bundle creation (`code import from-code`) - βœ… Plan enrichment (LLM adds FEATURE-DATAPROCESSOR and 4 stories) - βœ… Plan review (auto-enrichment adds target users, value hypothesis, feature acceptance criteria, enhanced story acceptance criteria) - βœ… Enforcement configuration (`enforce stage` with BALANCED preset) @@ -1627,7 +1627,7 @@ rm -rf specfact-integration-tests **What's Validated**: -- βœ… Plan bundle creation (`import from-code`) +- βœ… Plan bundle creation (`code import from-code`) - βœ… Plan selection (`plan select` sets active plan) - βœ… Enforcement configuration (`enforce stage` with BALANCED preset) - βœ… Pre-commit hook setup (imports code, then compares) @@ -1636,7 +1636,7 @@ rm -rf specfact-integration-tests **Test Results**: -- Plan creation: βœ… `import from-code ` creates project bundle at `.specfact/projects//` (modular structure) +- Plan creation: βœ… `code import from-code ` creates project bundle at `.specfact/projects//` (modular structure) - Plan selection: βœ… `plan select` sets active plan correctly - Plan comparison: βœ… `plan compare --code-vs-plan` finds: - Manual plan: Active plan (set via `plan select`) @@ -1647,9 +1647,9 @@ rm -rf specfact-integration-tests **Key Findings**: -- βœ… `import from-code` should use bundle name "auto-derived" so `plan compare --code-vs-plan` can find it +- βœ… `code import from-code` should use bundle name "auto-derived" so `plan compare --code-vs-plan` can find it - βœ… `plan select` is the recommended way to set the baseline plan (cleaner than copying to `main.bundle.yaml`) -- βœ… Pre-commit hook workflow: `import from-code` β†’ `plan compare --code-vs-plan` works correctly +- βœ… Pre-commit hook workflow: `code import from-code` β†’ `plan compare --code-vs-plan` works correctly - βœ… Enforcement configuration is respected (HIGH β†’ BLOCK based on preset) **Conclusion**: Example 4 is **fully validated**. The pre-commit hook integration works end-to-end. The hook successfully imports current code, compares it against the active plan, and blocks commits when HIGH severity deviations are detected. The workflow demonstrates how SpecFact prevents breaking changes from being committed locally, before they reach CI/CD. @@ -1687,7 +1687,7 @@ rm -rf specfact-integration-tests Example 5 follows a similar workflow and should be validated using the same approach: 1. Create test files -2. Create plan bundle (`import from-code`) +2. Create plan bundle (`code import from-code`) 3. Enrich plan (if needed) 4. Review plan and add missing items 5. Configure enforcement diff --git a/docs/examples/integration-showcases/integration-showcases.md b/docs/examples/integration-showcases/integration-showcases.md index 84c3fb9e..cd90b06e 100644 --- a/docs/examples/integration-showcases/integration-showcases.md +++ b/docs/examples/integration-showcases/integration-showcases.md @@ -338,7 +338,7 @@ result = process_order(order_id="123") # ⚠️ Missing user_id #!/bin/sh # Import current code to create a new plan for comparison # Use bundle name "auto-derived" so plan compare --code-vs-plan can find it -specfact --no-banner import from-code auto-derived --repo . --output-format yaml > /dev/null 2>&1 +specfact --no-banner code import from-code auto-derived --repo . --output-format yaml > /dev/null 2>&1 # Compare: uses active plan (set via plan select) as manual, latest auto-derived plan as auto specfact --no-banner plan compare --code-vs-plan diff --git a/docs/examples/quick-examples.md b/docs/examples/quick-examples.md index 632816b3..c3ec6c09 100644 --- a/docs/examples/quick-examples.md +++ b/docs/examples/quick-examples.md @@ -66,7 +66,7 @@ specfact code import my-project --repo . --confidence 0.7 specfact code import my-project --repo . --shadow-only # CoPilot mode (enhanced prompts) -specfact --mode copilot import from-code my-project --repo . --confidence 0.7 +specfact --mode copilot code import from-code my-project --repo . --confidence 0.7 # Re-validate existing features (force re-analysis) specfact code import my-project --repo . --revalidate-features @@ -200,10 +200,10 @@ specfact init ide --ide cursor --force specfact code import my-project --repo . # Force CI/CD mode -specfact --mode cicd import from-code my-project --repo . +specfact --mode cicd code import from-code my-project --repo . # Force CoPilot mode -specfact --mode copilot import from-code my-project --repo . +specfact --mode copilot code import from-code my-project --repo . # Set via environment variable export SPECFACT_MODE=copilot diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md index a664e2f8..ada8d40a 100644 --- a/docs/getting-started/installation.md +++ b/docs/getting-started/installation.md @@ -9,27 +9,25 @@ expertise_level: [beginner] # Getting Started with SpecFact CLI -This guide will help you get started with SpecFact CLI in under 60 seconds. +This guide will help you get started with SpecFact CLI in under 60 seconds β€” first with **no install** (uvx), then with a **persistent** install (pip) when you want IDE workflows and a stable `specfact` command. -> **Primary Use Case**: SpecFact CLI is designed for **brownfield code modernization** - reverse-engineering existing codebases into documented specs with runtime contract enforcement. See [First Steps](quickstart.md) for brownfield workflows. +> **Primary use case**: brownfield code modernization β€” reverse-engineering existing codebases into documented specs with runtime contract enforcement. See the [5-Minute Quickstart](quickstart.md) for a full walkthrough. -## Installation +## Try it now β€” no install required -### Option 1: uvx (CLI-only Mode) - -No installation required - run directly: +Run SpecFact from PyPI without installing into your environment: ```bash -uvx specfact-cli@latest --help +cd /path/to/your/git/repo +uvx specfact-cli init --profile solo-developer +uvx specfact-cli code review run --path . --scope full ``` -**Best for**: Quick testing, CI/CD, one-off commands +You should see a **Verdict**, a **Score**, and a list of findings. That is the fastest way to validate SpecFact on real code. [Read the full quickstart β†’](quickstart.md) -**Limitations**: CLI-only mode uses deterministic local analysis and may show limited results for very small test cases. If you want IDE slash-command workflows with your own AI copilot, use the installed CLI setup in Option 2. +## Install for persistent use -### Option 2: pip (Installed CLI + IDE Prompt Mode) - -**Required for**: local `specfact` command availability, IDE integration, and slash-command workflows +Use pip when you want a local `specfact` command, IDE integration, and slash-command workflows. ```bash # System-wide @@ -68,23 +66,21 @@ specfact init --install all Then set up IDE integration: ```bash -# Initialize IDE integration (one-time per project) specfact init ide - -# Or specify IDE explicitly specfact init ide --ide cursor specfact init ide --ide vscode - -# Install required packages for contract enhancement specfact init ide --install-deps - -# Initialize for specific IDE and install dependencies specfact init ide --ide cursor --install-deps ``` **Important**: SpecFact CLI does **not** ship with built-in AI. `specfact init ide` installs prompt templates for supported IDEs so your chosen AI copilot can call SpecFact commands in a guided workflow. -### Option 3: Container +[More options ↓](#more-options) + +## More options +{: #more-options} + +### Container ```bash # Docker @@ -94,7 +90,7 @@ docker run --rm -v $(pwd):/workspace ghcr.io/nold-ai/specfact-cli:latest --help podman run --rm -v $(pwd):/workspace ghcr.io/nold-ai/specfact-cli:latest --help ``` -### Option 4: GitHub Action +### GitHub Action Create `.github/workflows/specfact.yml`: @@ -212,7 +208,7 @@ Profile outcomes: | Profile | Installed bundles | Available groups | |---|---|---| -| `solo-developer` | `specfact-codebase` | `code` | +| `solo-developer` | `specfact-codebase`, `specfact-code-review` | `code` | | `backlog-team` | `specfact-project`, `specfact-backlog`, `specfact-codebase` | `project`, `backlog`, `code` | | `api-first-team` | `specfact-spec`, `specfact-codebase` (+`specfact-project` dependency) | `project`, `code`, `spec` | | `enterprise-full-stack` | all five bundles | `project`, `backlog`, `code`, `spec`, `govern` | @@ -357,7 +353,7 @@ specfact code import my-project \ --report analysis.md # Analyze with CoPilot mode (enhanced prompts - CLI only, not for IDE) -specfact --mode copilot import from-code my-project \ +specfact --mode copilot code import from-code my-project \ --repo ./my-project \ --confidence 0.7 \ --report analysis.md diff --git a/docs/getting-started/quickstart.md b/docs/getting-started/quickstart.md index 1d35e081..e75741fb 100644 --- a/docs/getting-started/quickstart.md +++ b/docs/getting-started/quickstart.md @@ -4,71 +4,72 @@ title: 5-Minute Quickstart permalink: /getting-started/quickstart/ redirect_from: - /getting-started/first-steps/ -description: Get SpecFact CLI running in under 5 minutes - install, bootstrap, and analyze your first codebase. -keywords: [quickstart, first-run, bootstrap, analysis] +description: Get SpecFact CLI running in under 5 minutes β€” uvx first, then optional pip install for IDE workflows and deeper analysis. +keywords: [quickstart, first-run, bootstrap, analysis, uvx] audience: [solo, team] expertise_level: [beginner] doc_owner: specfact-cli tracks: - src/specfact_cli/** - openspec/** -last_reviewed: 2026-03-29 +last_reviewed: 2026-04-02 exempt: false exempt_reason: "" --- # 5-Minute Quickstart -Get from zero to your first SpecFact analysis in under 5 minutes. +Get from zero to a **scored code review** in a few commands. This path is aimed at developers who want one command and one clear result before reading about modules, profiles, or architecture. ## Prerequisites - Python 3.11+ (`python3 --version`) - A Git repository to analyze (or create a test project) -## Step 1: Install +## Step 1: Bootstrap with uvx (no pip install) + +From your repo root: ```bash -pip install specfact-cli +uvx specfact-cli init --profile solo-developer ``` -Or try without installing: `uvx specfact-cli@latest --help` +This installs the workflow bundles for the solo-developer profile (including the code-review module). See [specfact init](/core-cli/init/) for other profiles. -## Step 2: Bootstrap +## Step 2: Run a scored code review ```bash -# Navigate to your project -cd /path/to/your/project - -# Initialize with a profile -specfact init --profile solo-developer +uvx specfact-cli code review run --path . --scope full ``` -This installs the default set of workflow bundles. See [specfact init](/core-cli/init/) for other profiles. +You should see a **Verdict**, **Score**, and findings. That is the fastest β€œaha” path on a real codebase. + +## Step 3: Install SpecFact locally (optional) -## Step 3: Set Up IDE (Optional) +When you want a stable `specfact` command and IDE integration, install with pip: ```bash -specfact init ide --ide cursor --install-deps +pip install specfact-cli +cd /path/to/your/project +specfact init --profile solo-developer ``` -This creates `.specfact/` directory structure and IDE-specific prompt templates. - -## Step 4: Analyze Your Codebase +## Step 4: Set Up IDE (Optional) ```bash -specfact code import my-project --repo . +specfact init ide --ide cursor --install-deps ``` -SpecFact analyzes your code and extracts features, user stories, and dependency graphs into a project bundle at `.specfact/projects/my-project/`. +This creates `.specfact/` directory structure and IDE-specific prompt templates. -## Step 5: Check Project Health +## Step 5: Analyze Your Codebase and Check Health ```bash +specfact code import my-project --repo . specfact project health-check ``` -Review what SpecFact discovered about your codebase. +`code import` analyzes your code and extracts features, user stories, and dependency graphs into a project bundle at `.specfact/projects/my-project/`. `project health-check` summarizes what SpecFact discovered. ## Step 6: Validate diff --git a/docs/guides/copilot-mode.md b/docs/guides/copilot-mode.md index d2a40def..ffc3c769 100644 --- a/docs/guides/copilot-mode.md +++ b/docs/guides/copilot-mode.md @@ -30,7 +30,7 @@ Mode is auto-detected based on environment, or you can explicitly set it with `- ```bash # Explicitly enable CoPilot mode -specfact --mode copilot import from-code legacy-api --repo . --confidence 0.7 +specfact --mode copilot code import from-code legacy-api --repo . --confidence 0.7 # Mode is auto-detected based on environment (IDE integration, CoPilot API availability) specfact code import legacy-api --repo . --confidence 0.7 # Auto-detects CoPilot if available @@ -98,10 +98,10 @@ This context is used to generate enhanced prompts that instruct the AI IDE to: ```bash # CI/CD mode (fast, deterministic, Python-only) -specfact --mode cicd import from-code --repo . --confidence 0.7 +specfact --mode cicd code import from-code --repo . --confidence 0.7 # CoPilot mode (AI-first, semantic understanding, multi-language) -specfact --mode copilot import from-code --repo . --confidence 0.7 +specfact --mode copilot code import from-code --repo . --confidence 0.7 # Output (CoPilot mode): # Mode: CoPilot (AI-first analysis) diff --git a/docs/guides/troubleshooting.md b/docs/guides/troubleshooting.md index b25d45de..3998b421 100644 --- a/docs/guides/troubleshooting.md +++ b/docs/guides/troubleshooting.md @@ -135,7 +135,7 @@ specfact project health-check 4. **Use CoPilot mode** (recommended for brownfield - better semantic understanding): ```bash - specfact --mode copilot import from-code legacy-api --repo . --confidence 0.7 + specfact --mode copilot code import from-code legacy-api --repo . --confidence 0.7 ``` 5. **For legacy codebases**, start with minimal confidence and review extracted features: @@ -445,7 +445,7 @@ specfact project health-check 1. **Use explicit mode**: ```bash - specfact --mode copilot import from-code my-project --repo . + specfact --mode copilot code import from-code my-project --repo . ``` 2. **Check environment variables**: @@ -477,7 +477,7 @@ specfact project health-check 1. **Use CI/CD mode** (faster): ```bash - specfact --mode cicd import from-code my-project --repo . + specfact --mode cicd code import from-code my-project --repo . ``` 2. **Increase confidence threshold** (fewer features): diff --git a/docs/guides/use-cases.md b/docs/guides/use-cases.md index 193b3289..13f32e2d 100644 --- a/docs/guides/use-cases.md +++ b/docs/guides/use-cases.md @@ -46,7 +46,7 @@ specfact code import \ --report analysis-core.md # CoPilot mode (enhanced prompts, interactive) -specfact --mode copilot import from-code \ +specfact --mode copilot code import from-code \ --repo . \ --confidence 0.7 \ --report analysis.md diff --git a/docs/index.md b/docs/index.md index 9a46c7e7..d8d81fb9 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,117 +1,101 @@ --- layout: default title: SpecFact CLI Documentation -description: SpecFact is the validation and alignment layer for software delivery. Start here for the core CLI story, first steps, and the handoff into module-deep workflows. +description: Point SpecFact at your code, get a scored review and a fix list in minutes β€” then go deeper into backlog, specs, and CI when you need to. permalink: / -keywords: [specfact, core-cli, runtime, module-system, architecture] +keywords: [specfact, core-cli, quickstart, code review, onboarding] audience: [solo, team, enterprise] expertise_level: [beginner, intermediate, advanced] doc_owner: specfact-cli tracks: - src/specfact_cli/** - openspec/** -last_reviewed: 2026-03-29 +last_reviewed: 2026-04-02 exempt: false exempt_reason: "" --- # SpecFact CLI Documentation -SpecFact is the validation and alignment layer for software delivery. +**Point SpecFact at your code. Get a score and a list of what to fix.** No week-long setup β€” start with two commands, then add IDE prompts, backlog workflows, or CI gates when you want more. -This site is the canonical starting point for the core CLI story: what SpecFact is, why it exists, -what value you get from it, how to get started, and when to move into deeper bundle-owned workflows. +```bash +uvx specfact-cli init --profile solo-developer +uvx specfact-cli code review run --path . --scope full +``` -SpecFact does **not** include built-in AI. It pairs deterministic CLI commands with your chosen IDE -and copilot so fast-moving work has a stronger validation and alignment layer around it. +You should see a **Verdict** (PASS/FAIL), a **Score**, and a list of findings (for example dozens of categorized items on a real repo). That is the fastest way to see SpecFact on an existing project. [Read the full quickstart β†’](/getting-started/quickstart/) + +SpecFact does **not** include built-in AI. It pairs deterministic CLI commands with your chosen IDE and copilot so fast-moving work has a stronger validation and alignment layer around it. + +**SpecFact is the validation and alignment layer for software delivery.** --- ## What is SpecFact? -SpecFact helps you keep backlog intent, specifications, implementation, and validation from drifting -apart. +SpecFact helps you keep backlog intent, specifications, implementation, and validation from drifting apart. It supports spec-first handoffs with **OpenSpec** and spec-kit-style workflows so brownfield and AI-assisted teams can keep backlog language, specs, and code aligned. It is especially useful when: + - AI-assisted or β€œvibe-coded” work needs more rigor -- brownfield systems need trustworthy reverse-engineered understanding +- brownfield and legacy code need trustworthy reverse-engineered understanding of existing systems - teams want to avoid the β€œI wanted X but got Y” delivery failure - organizations need a path toward stronger shared policy enforcement ## Why does it exist? -Software delivery drifts in stages. Expectations change as they move from backlog language to -specification, from specification to implementation, and from implementation to review. SpecFact -exists to reduce that drift by giving you deterministic tooling for analysis, validation, and -alignment. +SpecFact exists because backlog/spec/code drift is expensive: teams ship the wrong thing, AI-assisted changes skip validation, and policy enforcement breaks down across IDEs and CI. SpecFact gives you a default starting point before you jump into module-deep workflows on the modules site. ## Why should I use it? -Use SpecFact when you want faster delivery without losing validation, stronger brownfield -understanding before making changes, and less drift between backlog intent, specifications, and the -code that actually lands. +Use SpecFact when you want faster delivery without losing validation, stronger brownfield understanding before making changes, and less drift between backlog intent, specifications, and the code that actually lands. ## What do I get? With SpecFact, you get: + - deterministic local tooling instead of opaque cloud dependence - a validation layer around AI-assisted delivery - codebase analysis and sidecar validation for brownfield work - stronger backlog/spec/code alignment -- a clean handoff from core runtime docs into module-deep workflows on `modules.specfact.io` +- a clean handoff from this site into module-deep workflows on [modules.specfact.io](https://modules.specfact.io/) ## How to get started -1. **[Installation](/getting-started/installation/)** - Install SpecFact CLI -2. **[5-Minute Quickstart](/getting-started/quickstart/)** - Get first value quickly -3. **[specfact init](/core-cli/init/)** - Bootstrap the core runtime and your local setup -4. **[Bootstrap Checklist](/module-system/bootstrap-checklist/)** - Verify bundle readiness +1. **[Installation](/getting-started/installation/)** β€” uvx (no install) or pip (persistent CLI) +2. **[5-Minute Quickstart](/getting-started/quickstart/)** β€” First commands on a repo +3. **[specfact init](/core-cli/init/)** β€” Profiles, bundles, and IDE setup +4. **[Bootstrap Checklist](/module-system/bootstrap-checklist/)** β€” Verify bundle readiness -If you are new to SpecFact, start here before jumping into module-deep workflows. - -## Choose Your Path +## Choose your path
-

Greenfield & AI-assisted delivery

-

Use SpecFact as the validation layer around fast-moving implementation work.

+

See what's wrong with your code right now

+

Run a scored code review on an existing repo with uvx, then iterate.

-

Brownfield and reverse engineering

-

Use SpecFact to understand an existing system and then hand insight into spec-first workflows.

+

Set up IDE slash-command workflows

+

Install the CLI, bootstrap bundles, then export prompts for Cursor, VS Code, and other IDEs.

-
-
-

Backlog to code alignment

-

Use SpecFact when the main problem is drift between expectations, specs, and implementation.

-
-

Team and policy enforcement

-

Use core runtime, governance, and shared workflow conventions to scale rigor across teams.

+

Add a pre-commit or CI gate

+

Wire SpecFact into local hooks or GitHub Actions for repeatable checks.

@@ -120,17 +104,17 @@ If you are new to SpecFact, start here before jumping into module-deep workflows The `specfact-cli` package provides the stable platform surface: -- **[specfact init](/core-cli/init/)** - Bootstrap and IDE setup -- **[specfact module](/core-cli/module/)** - Module lifecycle management -- **[specfact upgrade](/core-cli/upgrade/)** - CLI updates -- Runtime contracts, module discovery, registry bootstrapping, publisher trust, and shared orchestration +- **[specfact init](/core-cli/init/)** β€” Bootstrap bundles and optional IDE setup +- **[specfact module](/core-cli/module/)** β€” Install, enable, and upgrade workflow modules +- **[specfact upgrade](/core-cli/upgrade/)** β€” CLI self-update -Installed modules mount workflows under `project`, `backlog`, `code`, `spec`, and `govern`. +Installed modules add command groups such as `project`, `backlog`, `code`, `spec`, and `govern`. Deeper bundle docs live on [modules.specfact.io](https://modules.specfact.io/). ## Modules Documentation -`docs.specfact.io` is the default starting point. Move to the modules site when you need deeper -bundle-specific workflows, adapters, and authoring guidance. +`docs.specfact.io` is the default starting point and the **canonical starting point for the core CLI story** +for first-time readers on this site. Move to the modules site when you need **module-deep workflows**, +bundle-specific adapters, and authoring guidance. - **[Modules Docs Home](https://modules.specfact.io/)** - Backlog, project, spec, govern - **[Module Development](https://modules.specfact.io/authoring/module-development/)** - Build your own modules diff --git a/openspec/CHANGE_ORDER.md b/openspec/CHANGE_ORDER.md index 97f38955..c7e71951 100644 --- a/openspec/CHANGE_ORDER.md +++ b/openspec/CHANGE_ORDER.md @@ -126,6 +126,7 @@ The 2026-03-22 clean-code plan adds one new cross-repo change pair and re-sequen | docs | 08 | docs-12-docs-validation-ci | [#440](https://github.com/nold-ai/specfact-cli/issues/440) | docs-05-core-site-ia-restructure; docs-07-core-handoff-conversion; modules-repo/docs-06 through docs-10 | | docs | 09 | docs-13-core-nav-search-theme-roles | [#458](https://github.com/nold-ai/specfact-cli/issues/458) | docs-05-core-site-ia-restructure; docs-07-core-handoff-conversion; docs-12-docs-validation-ci; modules-repo/docs-13-nav-search-theme-roles (design parity only, no content ownership coupling) | | docs | 10 | docs-14-first-contact-story-and-onboarding (in progress) | [#466](https://github.com/nold-ai/specfact-cli/issues/466) | docs-05-core-site-ia-restructure βœ…; docs-07-core-handoff-conversion βœ…; docs-12-docs-validation-ci βœ…; docs-13-core-nav-search-theme-roles βœ…; Parent Feature: [#356](https://github.com/nold-ai/specfact-cli/issues/356) | +| docs | 11 | docs-new-user-onboarding | [#476](https://github.com/nold-ai/specfact-cli/issues/476) | Parent Feature: [#356](https://github.com/nold-ai/specfact-cli/issues/356); related [#466](https://github.com/nold-ai/specfact-cli/issues/466); vibe-coder uvx hero + CLI wow-path fixes | ### Docs refactoring plan addendum (2026-03-23) diff --git a/openspec/changes/docs-new-user-onboarding/.openspec.yaml b/openspec/changes/docs-new-user-onboarding/.openspec.yaml new file mode 100644 index 00000000..0f528039 --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/.openspec.yaml @@ -0,0 +1,2 @@ +schema: spec-driven +created: 2026-04-01 diff --git a/openspec/changes/docs-new-user-onboarding/TDD_EVIDENCE.md b/openspec/changes/docs-new-user-onboarding/TDD_EVIDENCE.md new file mode 100644 index 00000000..6eb9678f --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/TDD_EVIDENCE.md @@ -0,0 +1,54 @@ +# TDD evidence β€” docs-new-user-onboarding + +## 2026-04-02 (README + wow entrypoint contract) + +### Commands run (passing) + +- `hatch run pytest tests/unit/docs/test_wow_entrypoint_contract.py tests/e2e/test_wow_entrypoint.py tests/unit/docs/test_first_contact_story.py -v --no-cov` +- `hatch run format` + +### Summary + +- **README.md**: Rewrote **How do I get started** so the uvx two-command wow path (`init` + `code review run --scope full`) is first; persistent install and deeper workflows follow; **How It Works** updated to lead with review. +- **Tests**: `tests/unit/docs/test_wow_entrypoint_contract.py` locks README ↔ `docs/index.md` canonical command strings and section order; `tests/e2e/test_wow_entrypoint.py` runs `init --profile solo-developer` in a **temp git repo** and asserts registry readiness for the documented second step (mock bundles). + +## 2026-04-02 (implementation session) + +### Commands run (passing) + +- `hatch test tests/unit/specfact_cli/registry/test_profile_presets.py tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py tests/unit/specfact_cli/modules/test_module_upgrade_improvements.py tests/unit/specfact_cli/test_module_not_found_error.py tests/unit/specfact_cli/registry/test_dependency_resolver_pip_free.py tests/unit/specfact_cli/registry/test_versioned_bundle_deps.py -q` +- `hatch run format` +- `hatch run type-check` (0 errors; existing baseline warnings) + +### Summary + +- Profile `solo-developer` includes `specfact-code-review`; init installs marketplace bundle via `install_bundles_for_init`. +- `dependency_resolver` skips pip validation when pip is unavailable (uvx). +- `module_installer`: versioned `bundle_dependencies` dict entries; actionable `core_compatibility` error. +- `module` CLI: multi-install, multi-uninstall, upgrade with latest-skip, major-bump gate, `--yes`. +- Root CLI: module-not-found message includes `uvx specfact-cli init --profile solo-developer`. +- Init: prints `Installed: …` after profile/`--install` bundle install. +- Docs: `docs/index.md`, `docs/getting-started/installation.md`, `docs/getting-started/quickstart.md` updated for vibe-coder entry path. + +### Deferred / follow-up + +- **`specfact code review run --path .` without `--scope full`**: UX lives primarily in the **specfact-code-review** module (`nold-ai/specfact-cli-modules`); not changed in this repo. +- **`openspec sync --change …`**: local OpenSpec CLI has no `sync` subcommand in this environment; run the project’s documented sync workflow when available before archive. +- **7d full dependency-resolution wiring**: `_extract_bundle_dependencies` + message improvements landed; interactive dep resolution / `--dry-run` / graph (7d.11–7d.16) remain for a follow-up change if not bundled here. + +## 2026-04-02 (rebase + gate continuation) + +### Commands run (passing) + +- `git rebase origin/dev` (resolved `tasks.md` conflict; kept 7c.7 + 11.0) +- `hatch run yaml-lint` +- `hatch run contract-test` +- `hatch run pytest tests/unit -n 0 -q --no-cov` (full unit suite) + +### Fixes for dev merge + +- `docs/index.md`: restored first-contact story strings (`Why does it exist?`, tagline, canonical core CLI story, OpenSpec mention) for `test_first_contact_story` / `test_core_docs_site_contract` / `test_release_docs_parity`. +- `test_first_run_selection.py`: expectations for `solo-developer` + `install all` include `specfact-code-review` / six canonical bundles. +- `test_lean_help_output.py`: accept uvx init hint alongside `` placeholder. +- `test_commands.py` / `test_module_installer.py`: align with `nold-ai/specfact-backlog` install id and new `core_compatibility` error text. +- `test_multi_module_install_uninstall.py`: autouse fixture re-bootstraps `CommandRegistry` + `rebuild_root_app_from_registry()` after category-group tests mutate global CLI state. diff --git a/openspec/changes/docs-new-user-onboarding/design.md b/openspec/changes/docs-new-user-onboarding/design.md new file mode 100644 index 00000000..b0ef7b0a --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/design.md @@ -0,0 +1,125 @@ +## Context + +docs.specfact.io is a Jekyll static site (GitHub Pages). Source: `docs/`, front-matter required +on every page. Homepage is `docs/index.md`. Sidebar navigation in `docs/_layouts/default.html`. + +Two distinct user cohorts arrive at the docs: +- **Vibe coders**: non-Python-expert, heard "validate your vibe code", want results in seconds, + will not read installation guides. Their mental model: run one command, see something useful. +- **Experienced developers**: understand pip, virtual envs, module systems. Current docs already + work for this group. They are NOT the audience being lost. + +Testing confirms the real vibe-coder "wow" sequence: +```bash +uvx specfact-cli init --profile solo-developer # ~5 seconds, user-level module install +uvx specfact-cli code review run --path . --scope full # ~4 seconds, scored review output +``` +Total time to first result: ~10 seconds, zero pip install, zero virtual environment. + +This sequence works because `uvx specfact-cli init` installs modules at user level, and +subsequent `uvx specfact-cli` invocations detect and use them. + +**Friction points identified through direct testing:** +1. Running `uvx specfact-cli code review run --path .` without `--scope full` gives a confusing + git-diff error ("Unable to determine changed tracked files"). Vibe coders will stop here. +2. Running `uvx specfact-cli code review run` without init gives "Command 'code' is not installed" + β€” acceptable message but the fix ("specfact init --profile ") uses jargon. +3. The `code review run` command is not mentioned anywhere on the homepage. +4. The uvx path on the installation page is labelled "CLI-only Mode" and immediately warns about + "limited results" β€” this actively discourages the primary vibe-coder path. + +This is a docs change plus a minor CLI UX improvement (error message and `--scope` default). + +## Goals / Non-Goals + +**Goals:** +- Homepage leads with the 2-command vibe-coder sequence and names `code review run` explicitly +- uvx is the hero install method; pip is secondary for users who want a persistent installation +- "Command 'code' is not installed" error tells the user the exact init command to run +- `code review run --path .` works without requiring the user to know about `--scope full` + (either by defaulting to full scope when git diff is unavailable, or with a helpful inline hint) +- 3 action-oriented path cards; no persona or product-dimension labels +- Progressive disclosure: vibe coder hits wow in 10 seconds, then finds depth if curious +- All current advanced content remains β€” just reordered + +**Non-Goals:** +- Redesigning the Jekyll theme or sidebar +- Adding new CLI commands beyond a minor error-message improvement +- Rewriting modules.specfact.io +- Changing any URL permalink +- Explaining what "contracts", "icontract", or "beartype" mean on entry-level pages + +## Decisions + +**Decision 1: uvx as hero path, not "Option 1 with limitations"** + +The current framing "CLI-only Mode (uvx) β€” Limitations: may show limited results" actively +discourages the exact path vibe coders need. The `code review run` command produces full output +via uvx (all tools: ruff, radon, semgrep, basedpyright, pylint, contracts). The "limitations" +note referred to early-stage behaviour that no longer applies. + +Replacement: uvx is the first thing a new visitor sees on the installation page, labelled +"Try it now β€” no install required". pip is presented below as "Install for persistent use". + +**Decision 2: 2-command hero block on homepage, not a 4-command "quickstart"** + +The previous proposal embedded a 4-command block (pip install β†’ init β†’ code import β†’ repro). +For a vibe coder, `code import` and `repro` are unknown commands. The new block is 2 commands: +init β†’ code review run. The output (score + findings) is described inline so the user knows +what they'll see before they run it. + +**Decision 3: `--scope full` guidance β€” docs fix, CLI default as stretch goal** + +The confusing git-diff error when running `code review run --path .` needs to be fixed. +Primary approach: document the correct invocation clearly and consistently as +`code review run --path . --scope full`. Stretch goal: if the CLI can default to full scope +when not in a git repo or when no diff is available, that is a small quality-of-life win worth +a separate one-line fix in `review_run_command` β€” but it is not blocking this change. + +**Decision 4: Module-not-found error message improvement is in scope** + +The error "Command 'code' is not installed. Install workflow bundles with specfact init +--profile ..." is technically correct but uses jargon. Change: add the literal command +`uvx specfact-cli init --profile solo-developer` as the suggested fix when running via uvx, +alongside the generic message. This is a minor string change in the registry/bootstrap error path. + +**Decision 5: Path cards β€” 3 outcome cards ordered by frequency of first intent** + +1. "See what's wrong with your code right now" (code review run β€” the vibe-coder path) +2. "Set up IDE slash-command workflows" (init ide β€” the power-user path) +3. "Add a pre-commit or CI gate" (repro / GitHub Action β€” the team path) + +The old Greenfield/Brownfield/Backlog/Team taxonomy maps to internal product dimensions, not to +user intents at first contact. Users can be routed to those dimensions from within the cards. + +## Risks / Trade-offs + +[Risk]: Reordering uvx above pip on the installation page may confuse Python developers who +expect pip as the standard install path. +β†’ Mitigation: pip section remains immediately after uvx with a clear "For persistent installation" +label. No content is removed. + +[Risk]: Improving the module-not-found error message touches production CLI code. +β†’ Mitigation: it is a string change in the error output path, no logic change. If it misses this +PR it can ship as a tiny follow-up fix; the docs improvement is independent. + +[Risk]: "See what's wrong with your code right now" as the primary card might create +expectations that the tool is AI-powered. +β†’ Mitigation: the card body clarifies "deterministic analysis: naming, complexity, contracts, +types β€” no AI, no cloud" so the expectation is set correctly. + +## Migration Plan + +1. Update `docs/index.md`: hero + 2-command uvx block + 3 cards + reordered sections +2. Update `docs/getting-started/installation.md`: uvx hero β†’ pip secondary β†’ other options +3. Update `docs/getting-started/quickstart.md`: reframe opening, lead with uvx path +4. (Minor CLI) Improve module-not-found error message in registry bootstrap error path +5. No URL changes, no redirects needed +6. Local build: `bundle exec jekyll serve` β€” verify rendering +7. PR to `dev` branch + +## Open Questions + +- Should `code review run --path .` default to `--scope full` when a git diff is unavailable, + or show an inline help hint? Decision deferred to implementer β€” either fix is acceptable. + The docs MUST document `--scope full` explicitly regardless. diff --git a/openspec/changes/docs-new-user-onboarding/proposal.md b/openspec/changes/docs-new-user-onboarding/proposal.md new file mode 100644 index 00000000..9bec3688 --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/proposal.md @@ -0,0 +1,151 @@ +## Why + +User feedback and direct testing reveal two distinct user cohorts arriving at docs.specfact.io with +very different mental models: + +**Vibe coders** (new, non-Python-expert audience): heard "validate your vibe code with specfact", +want one command, want to see results in seconds, have no patience for install guides, module +concepts, or architecture diagrams. Their question is: *what do I run right now?* + +**Experienced developers**: understand pip, virtual envs, profiles, and module systems. They can +navigate the existing docs. The current docs already work for them β€” the problem is these users +are not the ones being lost. + +The current docs are built entirely for the second group. The first group bounces immediately +because: (1) the homepage hero uses platform-internal vocabulary before showing any command, +(2) the `code review run` command β€” the single highest-impact entry point for vibe coders β€” is +not mentioned on the homepage at all, (3) the uvx path is listed under "Option 1" with an +immediate "Limitations" warning that actively discourages it, (4) the path cards group users by +persona and product dimension rather than by what they want to do right now. + +The intended vibe-coder entry sequence is: +```bash +uvx specfact-cli init --profile solo-developer # once β€” should install modules +uvx specfact-cli code review run --path . --scope full # the "wow" command +``` +This sequence should work in ~10 seconds with no pip install and no virtual environment. However, +direct testing reveals it is **completely broken** due to three bugs: + +**Bug 1 β€” `init --profile solo-developer` installs nothing**: Running this command reports +"Bootstrap complete. Modules discovered: 8 (enabled=8)" but installs no workflow modules. +`code review run` still fails with "Command 'code' is not installed" immediately after. + +**Bug 2 β€” `module install` fails via uvx**: Running `specfact module install nold-ai/specfact-code-review` +under uvx fails with "No module named pip" because the uvx-isolated environment has no pip. +There is currently no working path to install modules via uvx on a fresh machine. + +**Bug 3 β€” profile `solo-developer` is incomplete**: Even if install were working, `solo-developer` +maps to `specfact-codebase` in the docs, but `code review run` requires TWO modules: +`nold-ai/specfact-codebase` AND `nold-ai/specfact-code-review`. The profile does not include +the code-review module. + +A fourth UX problem: running `uvx specfact-cli code review run --path .` without `--scope full` +produces a confusing git-diff error. Vibe coders will stop there and think the tool is broken. + +**This means the "wow" path does not exist yet.** All four issues must be fixed before the docs +can truthfully describe a vibe-coder entry sequence. + +## What Changes + +**Bug fixes (blocking the "wow" path):** +- **Fix `init --profile` module installation**: `specfact init --profile solo-developer` must + actually install the modules defined for that profile, not just bootstrap the runtime +- **Fix `module install` under uvx**: module installation must work without pip in the uvx + environment (use the bundled package approach or a pip-free install path) +- **Update `solo-developer` profile**: include `nold-ai/specfact-code-review` alongside + `nold-ai/specfact-codebase` so the profile delivers a working `code review run` command +- **Fix `code review run --path .` without `--scope full`**: either default to full scope when + no git diff is available, or emit an error that includes the corrective command + +**Docs improvements (unlocked once bugs are fixed):** +- **Homepage hero completely rewritten**: opens with the vibe-coder outcome statement, immediately + followed by the working 2-command uvx sequence +- **`code review run` is explicitly named on the homepage** as the primary entry command +- **uvx path promoted from "Option 1 with limitations" to the hero path** on the installation page +- **3 outcome-oriented path cards** replace the 4 topic/persona cards +- **Architectural jargon deferred** to Architecture/Reference sections +- **Progressive disclosure preserved**: all advanced content remains, reordered + +## Capabilities + +### New Capabilities +- `dependency-resolution`: Version-aware bundle dependency resolution for `module install` and + `module upgrade` β€” versioned specifiers in registry `index.json` and `module-package.yaml`, + user prompts on missing/mismatched deps, `--yes` for auto-resolve, `--dry-run` for preview, + circular dep detection, actionable `core_compatibility` errors +- `docs-aha-moment-entry`: Homepage and installation page are restructured so a vibe coder can + reach a scored `code review run` result in under 2 commands and ~10 seconds, without pip install + or prior SpecFact knowledge +- `docs-vibecoder-entry-path`: Vibe-coder-specific entry path: uvx init β†’ uvx code review run, + with the scored output as the explicit "wow" proof point on the homepage + +### Modified Capabilities +- `entrypoint-onboarding`: (1) Primary fast-start path must be inline on homepage; (2) path cards + name user actions not personas; (3) `code review run` is the named primary command +- `first-contact-story`: Hero pairs identity with a plain-language outcome; no architectural + vocabulary in the hero +- `first-run-selection`: `init --profile` MUST install the profile's modules; module-not-found + error MUST include the exact corrective command +- `profile-presets`: `solo-developer` profile MUST include `nold-ai/specfact-code-review` + alongside `nold-ai/specfact-codebase` +- `module-installation`: (1) `module upgrade` MUST distinguish actually-upgraded from + already-up-to-date β€” showing `X -> X` when nothing changed is a bug; (2) `module upgrade` + MUST accept multiple selective module names and MUST prompt before applying a major version + bump (breaking change gate), skippable with `--yes` or auto-skipped in CI/CD mode; + (3) `module install` and `module uninstall` MUST both accept multiple positional module IDs + so users can act on several modules in one command (same UX as apt/pip/brew) + +## Impact + +**CLI changes:** +- `src/specfact_cli/modules/init/` β€” fix `--profile` to actually install profile modules +- `src/specfact_cli/modules/module-registry/` or install path β€” fix pip-free install under uvx +- Profile definition for `solo-developer` β€” add `specfact-code-review` to bundle list +- `src/specfact_cli/` review_run command or registry β€” fix `--scope` default / better error +- Module-not-found error path β€” include exact corrective command +- `src/specfact_cli/modules/module_registry/src/commands.py:_run_marketplace_upgrades` β€” + distinguish actually-upgraded vs already-up-to-date; never show `X -> X` +- `src/specfact_cli/modules/module_registry/src/commands.py:upgrade` β€” accept multiple positional + module names; check registry `latest_version` before installing; prompt on major version bumps; + `--yes` flag to bypass prompt; auto-skip major bumps in CI/CD mode +- `src/specfact_cli/modules/module_registry/src/commands.py:install` β€” accept multiple module + IDs as positional arguments (same UX as apt/pip/brew) +- `src/specfact_cli/modules/module_registry/src/commands.py:uninstall` β€” accept multiple module + names as positional arguments +- `registry/index.json` (specfact-cli-modules repo) β€” extend `bundle_dependencies` schema to + support `{"id": "...", "version": ">=x.y.z"}` objects alongside plain string entries +- `src/specfact_cli/registry/module_installer.py` β€” evaluate `module_dependencies_versioned` + and versioned `bundle_dependencies`; prompt on missing/mismatched deps; `--yes` auto-resolve; + `--dry-run` preview; circular dep detection +- `src/specfact_cli/registry/dependency_resolver.py` β€” add module-to-module resolution + (analogous to existing pip resolution) +- `core_compatibility` error path β€” replace silent exception with actionable user-facing message + +**Docs changes:** +- `docs/index.md` β€” primary rewrite (hero + uvx block + 3 cards) +- `docs/getting-started/installation.md` β€” promote uvx, restructure options +- `docs/getting-started/quickstart.md` β€” reframe for vibe-coder audience + +**Spec changes:** +- `openspec/specs/entrypoint-onboarding/spec.md` β€” delta +- `openspec/specs/first-contact-story/spec.md` β€” delta +- `openspec/specs/first-run-selection/spec.md` β€” delta (profile install requirement) +- `openspec/specs/profile-presets/spec.md` β€” delta (solo-developer bundle list) +- New specs: `docs-aha-moment-entry`, `docs-vibecoder-entry-path` + +## Source Tracking + +- **GitHub Issue**: [issue-476] +- **Issue URL**: [issue-476-url] +- **Parent Feature**: [issue-356] β€” Documentation & Discrepancy Remediation ([tracking comment][comment-356]) +- **Related (overlap)**: [issue-466] β€” first-contact / onboarding ([cross-link comment][comment-466]) +- **Repository**: nold-ai/specfact-cli +- **Last Synced Status**: in-progress β€” issue created with labels `enhancement`, `change-proposal`, `documentation`, + `openspec` + +[issue-476]: https://github.com/nold-ai/specfact-cli/issues/476 +[issue-476-url]: https://github.com/nold-ai/specfact-cli/issues/476 +[issue-356]: https://github.com/nold-ai/specfact-cli/issues/356 +[comment-356]: https://github.com/nold-ai/specfact-cli/issues/356#issuecomment-4180162525 +[issue-466]: https://github.com/nold-ai/specfact-cli/issues/466 +[comment-466]: https://github.com/nold-ai/specfact-cli/issues/466#issuecomment-4180162609 diff --git a/openspec/changes/docs-new-user-onboarding/specs/dependency-resolution/spec.md b/openspec/changes/docs-new-user-onboarding/specs/dependency-resolution/spec.md new file mode 100644 index 00000000..19aa63a9 --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/specs/dependency-resolution/spec.md @@ -0,0 +1,166 @@ +## ADDED Requirements + +### Requirement: Registry index supports versioned bundle dependencies + +The marketplace registry `index.json` SHALL support optional version specifiers on +`bundle_dependencies` entries. Each entry MAY be either a plain module ID string (unversioned, +backward-compatible) or an object with `id` and `version` fields (versioned). The CLI installer +SHALL handle both forms. + +#### Scenario: Registry entry declares a versioned bundle dependency + +- **GIVEN** a registry entry with: + ```json + "bundle_dependencies": [ + {"id": "nold-ai/specfact-project", "version": ">=0.41.0"} + ] + ``` +- **WHEN** the installer processes this entry +- **THEN** the installer SHALL treat `nold-ai/specfact-project` as a required dependency with + the constraint `>=0.41.0` + +#### Scenario: Registry entry declares an unversioned bundle dependency (backward compat) + +- **GIVEN** a registry entry with `"bundle_dependencies": ["nold-ai/specfact-project"]` +- **WHEN** the installer processes this entry +- **THEN** the installer SHALL treat the dependency as requiring any installed version +- **AND** SHALL NOT reject existing manifests that use plain string form + +### Requirement: Install-time dependency version resolution + +During `specfact module install`, the system SHALL resolve all `bundle_dependencies` from both +the registry index and the module's `module-package.yaml` manifest. For each dependency: +- If the dependency is not installed, the CLI SHALL prompt the user to install it +- If the dependency is installed but its version does not satisfy the declared specifier, the CLI + SHALL prompt the user to upgrade it +- With `--yes`, missing or mismatched dependencies SHALL be auto-resolved without prompting +- With `--skip-deps`, dependency resolution SHALL be skipped entirely (existing behaviour) + +#### Scenario: Installing a module whose dependency is not installed + +- **GIVEN** module A declares `bundle_dependencies: [{"id": "nold-ai/specfact-project", "version": ">=0.41.0"}]` +- **AND** `specfact-project` is NOT installed +- **WHEN** user runs `specfact module install A` +- **THEN** the CLI SHALL print: + `A requires nold-ai/specfact-project >=0.41.0 which is not installed.` +- **AND** in interactive mode SHALL prompt: `Install nold-ai/specfact-project now? [Y/n]` +- **AND** if the user confirms, SHALL install the dependency before installing A +- **AND** if the user declines, SHALL abort with exit code 1 + +#### Scenario: Installing a module whose dependency version is insufficient + +- **GIVEN** module A requires `nold-ai/specfact-project >=0.41.0` +- **AND** `specfact-project` is installed at version `0.40.2` +- **WHEN** user runs `specfact module install A` +- **THEN** the CLI SHALL print: + `A requires nold-ai/specfact-project >=0.41.0 but 0.40.2 is installed.` +- **AND** in interactive mode SHALL prompt: `Upgrade nold-ai/specfact-project to satisfy constraint? [Y/n]` +- **AND** if confirmed, SHALL upgrade the dependency before installing A +- **AND** if declined, SHALL abort with exit code 1 + +#### Scenario: Dependency already satisfied β€” no prompt + +- **GIVEN** module A requires `nold-ai/specfact-project >=0.41.0` +- **AND** `specfact-project` is installed at version `0.41.2` +- **WHEN** user runs `specfact module install A` +- **THEN** the CLI SHALL NOT prompt about the dependency +- **AND** SHALL log at INFO level: "Dependency nold-ai/specfact-project 0.41.2 satisfies >=0.41.0" + +#### Scenario: Non-interactive / CI mode with unsatisfied dependency + +- **GIVEN** the CLI is running in CI/CD mode and a dependency is not installed +- **WHEN** user runs `specfact module install A` without `--yes` +- **THEN** the CLI SHALL print the dependency error and exit non-zero +- **AND** SHALL NOT silently install the dependency +- **AND** SHALL suggest re-running with `--yes` to auto-resolve + +#### Scenario: Auto-resolve dependencies with --yes + +- **GIVEN** module A has an unmet dependency +- **WHEN** user runs `specfact module install A --yes` +- **THEN** the CLI SHALL install or upgrade all required dependencies automatically +- **AND** SHALL print a summary of what was auto-installed/upgraded before installing A + +### Requirement: Upgrade-time dependency re-evaluation + +During `specfact module upgrade`, the system SHALL re-evaluate the new version's +`bundle_dependencies` after fetching its updated manifest. If the new version introduces new +or tighter dependency requirements that are not currently satisfied, the CLI SHALL prompt the +user to resolve them before completing the upgrade. + +#### Scenario: Upgraded module requires a newer version of a dependency + +- **GIVEN** module A is being upgraded from `0.41.0` to `0.42.0` +- **AND** `0.42.0`'s manifest declares `nold-ai/specfact-project >=0.42.0` +- **AND** `specfact-project` is installed at `0.41.2` +- **WHEN** user runs `specfact module upgrade A` +- **THEN** the CLI SHALL print: + `A 0.42.0 requires nold-ai/specfact-project >=0.42.0 but 0.41.2 is installed.` +- **AND** SHALL prompt: `Upgrade nold-ai/specfact-project to satisfy constraint? [Y/n]` +- **AND** if confirmed, SHALL upgrade the dependency before completing the upgrade of A +- **AND** if declined, SHALL abort the upgrade of A and leave the existing version in place + +#### Scenario: Upgraded module introduces a new dependency not yet installed + +- **GIVEN** module A `0.42.0` introduces a new `bundle_dependencies` entry not present in `0.41.0` +- **AND** the new dependency is not installed +- **WHEN** user runs `specfact module upgrade A` +- **THEN** the CLI SHALL prompt to install the new dependency (same flow as install-time) + +#### Scenario: Upgrade with --yes auto-resolves dependency changes + +- **GIVEN** an upgrade introduces new or tighter dependency requirements +- **WHEN** user runs `specfact module upgrade A --yes` +- **THEN** all dependency installs and upgrades SHALL proceed automatically without prompting + +### Requirement: Core CLI compatibility check produces a clear actionable error + +When a module's `core_compatibility` specifier is not satisfied by the installed CLI version, +the error message SHALL tell the user both the required range and the current CLI version, and +SHALL suggest the corrective action. + +#### Scenario: Module requires a newer CLI version + +- **GIVEN** module A declares `core_compatibility: ">=0.45.0,<1.0.0"` +- **AND** the installed CLI is version `0.44.0` +- **WHEN** user runs `specfact module install A` +- **THEN** the CLI SHALL print: + `A requires SpecFact CLI >=0.45.0 but you have 0.44.0.` + `Run: specfact upgrade or uvx specfact-cli@latest` +- **AND** SHALL exit non-zero without installing + +#### Scenario: Module is incompatible with current CLI major version + +- **GIVEN** module A declares `core_compatibility: ">=0.40.0,<1.0.0"` +- **AND** the installed CLI is version `1.0.0` +- **WHEN** user runs `specfact module install A` +- **THEN** the CLI SHALL print a clear incompatibility message with the constraint and current version +- **AND** SHALL suggest checking for a newer version of the module from the marketplace + +### Requirement: Circular dependency detection + +The installer SHALL detect circular `bundle_dependencies` references and abort with a clear error. + +#### Scenario: Circular bundle dependency detected + +- **GIVEN** module A depends on B and B depends on A +- **WHEN** the installer processes the dependency graph +- **THEN** the installer SHALL detect the cycle and print: + `Circular dependency detected: A -> B -> A` +- **AND** SHALL abort the install with exit code 1 without installing any module in the cycle + +### Requirement: Dry-run shows dependency resolution plan + +The install and upgrade commands SHALL support a `--dry-run` flag that shows the full dependency +resolution plan without performing any installs or upgrades. + +#### Scenario: Dry-run install shows what would be installed + +- **WHEN** user runs `specfact module install A --dry-run` +- **THEN** the CLI SHALL print a dependency plan: + ``` + Would install: + nold-ai/specfact-project 0.41.2 (required by A >=0.41.0) + nold-ai/A 0.42.0 + ``` +- **AND** SHALL exit 0 without modifying any installed modules diff --git a/openspec/changes/docs-new-user-onboarding/specs/docs-aha-moment-entry/spec.md b/openspec/changes/docs-new-user-onboarding/specs/docs-aha-moment-entry/spec.md new file mode 100644 index 00000000..886f756c --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/specs/docs-aha-moment-entry/spec.md @@ -0,0 +1,81 @@ +## ADDED Requirements + +### Requirement: Homepage names `code review run` as the primary entry command + +The docs homepage SHALL explicitly name `specfact code review run` as the primary command for the +"review your code" use case. The command SHALL appear in the hero section before any path cards, +module system explanations, or architecture descriptions. + +#### Scenario: Vibe coder arrives at the homepage + +- **WHEN** a first-time visitor who heard "validate your vibe code with specfact" lands on + docs.specfact.io +- **THEN** the homepage SHALL display a fenced code block showing: + `uvx specfact-cli init --profile solo-developer` and + `uvx specfact-cli code review run --path . --scope full` + as the 2-command entry sequence +- **AND** the block SHALL appear before any path cards, architecture sections, or module links +- **AND** the expected output (scored review result with findings) SHALL be described adjacent to + the block so the user knows what they will see before running the commands + +#### Scenario: Visitor can start without navigating away + +- **WHEN** a visitor reads only the homepage without clicking any link +- **THEN** they SHALL have all commands needed to run their first code review +- **AND** no prior Python or pip knowledge SHALL be required to understand or run those commands + +### Requirement: Path cards name user outcomes in plain language + +The "Choose your path" cards on the homepage SHALL use plain language that a non-Python-expert +can understand. Each card heading SHALL describe what the user will achieve immediately. + +#### Scenario: Vibe coder reads card headings + +- **WHEN** a vibe coder with no prior SpecFact knowledge reads the path card headings +- **THEN** the first card heading SHALL be oriented toward reviewing existing code immediately + (e.g. "See what's wrong with your code right now") +- **AND** no heading SHALL use internal labels such as "Greenfield & AI-assisted delivery", + "Brownfield and reverse engineering", "Backlog to code alignment", or "Team and policy enforcement" + as the primary title +- **AND** each card body SHALL describe the user outcome and the key command, not the product + architecture or module name + +#### Scenario: User with no prior SpecFact knowledge selects a path + +- **WHEN** a user with no prior SpecFact knowledge reads the three path cards +- **THEN** they SHALL be able to identify which card matches their immediate goal without + understanding SpecFact's internal module or bundle architecture + +### Requirement: Architectural jargon deferred below the fold + +Terms describing internal platform architecture SHALL NOT appear in the above-the-fold hero content +of the homepage. They may appear in Architecture, Reference, or Module System sections lower on +the page. + +#### Scenario: Above-the-fold homepage content audit + +- **WHEN** the homepage is rendered at a standard viewport (1280Γ—800) +- **THEN** the visible content SHALL NOT include any of the following terms before the user scrolls: + "module discovery", "registry bootstrapping", "publisher trust", "mounted workflow groups", + "runtime contracts" (used as a section label or navigation entry) +- **AND** those terms MAY appear in Architecture or Reference sections below the fold + +### Requirement: Installation page promotes uvx as the no-install entry path + +The installation page SHALL present the uvx invocation as the primary "try it now" path for new +users, without a "Limitations" warning that discourages its use. + +#### Scenario: New user opens the installation page + +- **WHEN** a first-time user opens the installation page +- **THEN** the first visible section SHALL present the uvx 2-command sequence + (init + code review run) under a heading such as "Try it now β€” no install required" +- **AND** the section SHALL NOT include a "Limitations" warning about the uvx path +- **AND** pip installation SHALL appear in a clearly labelled secondary section + ("Install for persistent use" or equivalent) + +#### Scenario: User wants to find alternative installation methods + +- **WHEN** a user wants to find Container or GitHub Action installation options +- **THEN** a visible section heading or anchor link SHALL allow them to jump to those options + without reading through the uvx or pip sections first diff --git a/openspec/changes/docs-new-user-onboarding/specs/docs-vibecoder-entry-path/spec.md b/openspec/changes/docs-new-user-onboarding/specs/docs-vibecoder-entry-path/spec.md new file mode 100644 index 00000000..35f690c5 --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/specs/docs-vibecoder-entry-path/spec.md @@ -0,0 +1,72 @@ +## ADDED Requirements + +### Requirement: Vibe-coder entry path is discoverable and runnable in under 2 commands + +The documentation entry surface SHALL make it possible for a developer who has never used +SpecFact before β€” and who does not know Python packaging β€” to reach a scored `code review run` +result in 2 commands and approximately 10 seconds, without pip install or virtual environment +setup. + +#### Scenario: Vibe coder runs the entry sequence for the first time + +- **GIVEN** a developer with `uvx` available (via the `uv` toolchain) and a git repository +- **WHEN** they run `uvx specfact-cli init --profile solo-developer` followed by + `uvx specfact-cli code review run --path . --scope full` +- **THEN** the first command SHALL install the required modules at user level in under 10 seconds +- **AND** the second command SHALL produce a scored code review result with categorised findings +- **AND** no additional configuration, pip install, or virtual environment setup SHALL be required + +#### Scenario: Entry path is documented with expected output + +- **WHEN** a visitor reads the homepage or installation page +- **THEN** the documentation SHALL show the expected output format of `code review run` + (e.g. "Verdict: FAIL | Score: 0 | 64 findings across naming, complexity, and type checks") + so the user knows what a successful first run looks like before they run it + +### Requirement: `code review run --path .` provides actionable guidance when scope is missing + +When a user runs `specfact code review run --path .` without `--scope full` in a context where +git diff is unavailable or produces no output, the CLI SHALL provide an actionable inline hint +rather than a bare error. + +#### Scenario: User runs `code review run --path .` without `--scope full` + +- **GIVEN** the user is in a git repository with no staged or unstaged changes visible via + `git diff HEAD` +- **WHEN** they run `specfact code review run --path .` +- **THEN** the CLI SHALL either: + (a) default automatically to `--scope full` when no diff is available, OR + (b) display an error that includes the exact command to run: + `specfact code review run --path . --scope full` +- **AND** the error SHALL NOT only say "Unable to determine changed tracked files" without + providing the corrective command + +### Requirement: Module-not-found error provides an exact uvx init command + +When a user attempts to run a module command that is not installed and the CLI detects a uvx +execution context, the error message SHALL include the exact `uvx specfact-cli init` command +as the suggested fix. + +#### Scenario: Vibe coder runs `uvx specfact-cli code review run` before init + +- **GIVEN** a user running via `uvx specfact-cli` with no modules installed at user level +- **WHEN** they run `uvx specfact-cli code review run --path . --scope full` +- **THEN** the CLI SHALL display an error message that includes: + `uvx specfact-cli init --profile solo-developer` + as the suggested fix command +- **AND** the message SHALL NOT only reference "workflow bundles" without giving an exact command + +### Requirement: Plain-language value statement precedes technical vocabulary on entry pages + +The docs homepage and installation page SHALL open with a plain-language statement of what the +user will get β€” using vocabulary a non-Python-expert understands β€” before introducing any +technical terms. + +#### Scenario: Non-Python developer reads the homepage + +- **WHEN** a developer who primarily uses JavaScript, no-code tools, or AI-assisted coding reads + the homepage hero section +- **THEN** they SHALL encounter at least one sentence they can understand without Python or CLI + expertise (e.g. "Point it at your code. Get a score and a list of what to fix.") +- **AND** the first technical term they encounter SHALL be a command they can copy and run, + not a concept they need to research first diff --git a/openspec/changes/docs-new-user-onboarding/specs/entrypoint-onboarding/spec.md b/openspec/changes/docs-new-user-onboarding/specs/entrypoint-onboarding/spec.md new file mode 100644 index 00000000..5b1d28a7 --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/specs/entrypoint-onboarding/spec.md @@ -0,0 +1,44 @@ +## MODIFIED Requirements + +### Requirement: One primary fast-start path + +The central entry points SHALL provide one primary "start here now" path before branching into more +specialized persona or workflow guidance. The fast-start path SHALL be inline on the homepage β€” it +SHALL NOT require the user to navigate to a separate page to obtain the first working command. +The primary command in the fast-start path SHALL be `specfact code review run` invoked via uvx, +as this is the highest-value, lowest-friction entry point for the broadest new-user audience. + +#### Scenario: Vibe coder arrives at the homepage + +- **WHEN** a first-time visitor who heard "validate your vibe code with specfact" lands on the + homepage +- **THEN** the page SHALL display the 2-command uvx sequence as the first actionable content: + `uvx specfact-cli init --profile solo-developer` followed by + `uvx specfact-cli code review run --path . --scope full` +- **AND** the sequence SHALL appear before path cards, module navigation, or architecture content +- **AND** the expected result (score + categorised findings) SHALL be described so the user knows + what "success" looks like + +#### Scenario: User can complete the first run without leaving the homepage + +- **WHEN** a first-time visitor reads the homepage without clicking any link +- **THEN** they SHALL find all commands needed to run their first code review +- **AND** no navigation to installation.md, quickstart.md, or modules.specfact.io SHALL be required + to obtain and run those commands + +### Requirement: Choose-your-path guidance follows the first-run path + +After the primary fast-start path, entry points SHALL route users into the most relevant next step +for their intent. Path options SHALL be described as user outcomes in plain language that a +non-Python-expert can understand. + +#### Scenario: User needs the right next path + +- **WHEN** the user completes or reviews the first-run path +- **THEN** the entry point SHALL offer clear next-step options including at least: + - reviewing existing code immediately (the `code review run` path) + - setting up IDE slash-command workflows with a supported copilot + - enabling a pre-commit or CI validation gate +- **AND** each card heading SHALL describe what the user will do or get, not a product persona + or internal module taxonomy +- **AND** card descriptions SHALL use vocabulary understandable without Python or CLI expertise diff --git a/openspec/changes/docs-new-user-onboarding/specs/first-contact-story/spec.md b/openspec/changes/docs-new-user-onboarding/specs/first-contact-story/spec.md new file mode 100644 index 00000000..adfba053 --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/specs/first-contact-story/spec.md @@ -0,0 +1,58 @@ +## MODIFIED Requirements + +### Requirement: Canonical first-contact product story + +The repository and documentation entry points SHALL present one canonical product story that answers +the first-contact questions for both vibe coders and experienced developers. The hero statement SHALL +use plain language that works for a developer who does not know Python packaging β€” not just for +someone already familiar with contracts, modules, and runtimes. + +The canonical answer to "what is SpecFact?" SHALL describe what the user gets immediately +("a score and a list of what to fix") before explaining how it is achieved internally. + +#### Scenario: Vibe coder reads the homepage hero + +- **WHEN** a developer who primarily uses AI-assisted or no-code tools reads the homepage hero +- **THEN** the first sentence SHALL describe an outcome they will recognise + (e.g. "Point it at your code. Get a score and a list of what to fix.") +- **AND** the hero SHALL NOT open with "validation and alignment layer", "runtime contracts", + or any other phrase that requires prior familiarity with the product + +#### Scenario: User compares repo and docs entry points + +- **WHEN** a user reads the repo README and the core docs homepage +- **THEN** both SHALL describe the same core product identity +- **AND** they SHALL NOT give conflicting first impressions about whether SpecFact is primarily + a CLI, a module platform, an AI tool, or a backlog tool + +#### Scenario: Hero statement pairs identity with a concrete, time-bounded outcome + +- **WHEN** a first-time visitor reads the hero on the docs homepage or README +- **THEN** the primary headline or subheadline SHALL communicate a concrete achievable outcome + with a time signal (e.g. "See what's wrong with your code in 10 seconds") +- **AND** the outcome statement SHALL appear before any explanation of internal architecture, + module system, or platform topology +- **AND** the hero SHALL include or link directly to the runnable 2-command uvx sequence + +### Requirement: Headline and proof-point separation + +First-contact surfaces SHALL keep the primary identity statement separate from supporting proof +points. Platform-internal vocabulary SHALL NOT appear in the hero or primary identity statement. +The hero SHALL work for a non-Python-expert; advanced vocabulary may appear in proof-point +sections below the hero. + +#### Scenario: User scans the first screen + +- **WHEN** a user scans the first screen of the README or docs homepage +- **THEN** the primary message SHALL fit in a short headline/subheadline structure +- **AND** secondary capability claims (contracts, SDD/TDD, brownfield, module extensibility) + SHALL appear as proof points after the hero, not as headline overload +- **AND** platform-internal architectural terms SHALL NOT appear in the above-the-fold hero + +#### Scenario: Experienced developer also finds their next step + +- **WHEN** an experienced Python developer reads the homepage after the hero section +- **THEN** they SHALL find links to the pip installation path, profile options, and deeper + technical documentation without needing to search for them +- **AND** the progressive depth SHALL be clearly layered: vibe-coder entry β†’ developer setup β†’ + advanced configuration diff --git a/openspec/changes/docs-new-user-onboarding/specs/first-run-selection/spec.md b/openspec/changes/docs-new-user-onboarding/specs/first-run-selection/spec.md new file mode 100644 index 00000000..51e58d15 --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/specs/first-run-selection/spec.md @@ -0,0 +1,58 @@ +## MODIFIED Requirements + +### Requirement: `specfact init` detects first-run and presents bundle selection + +On a fresh install where no bundles are installed, `specfact init` SHALL present an interactive +bundle selection UI. When `--profile` is provided, `specfact init` SHALL install the profile's +canonical bundle set without requiring user interaction, and SHALL exit successfully only after +the bundles are fully installed and registered β€” not merely after runtime bootstrap. + +#### Scenario: First-run interactive bundle selection in Copilot mode + +- **GIVEN** a fresh SpecFact install with no bundles installed +- **AND** the CLI is running in Copilot (interactive) mode +- **WHEN** the user runs `specfact init` +- **THEN** the CLI SHALL display a welcome banner +- **AND** SHALL show the core modules as always-selected (non-deselectable): init, auth, module, upgrade +- **AND** SHALL present a multi-select list of the 5 workflow bundles with descriptions: + - Project lifecycle (project, plan, import, sync, migrate) + - Backlog management (backlog, policy) + - Codebase quality (analyze, drift, validate, repro) + - Spec & API (contract, spec, sdd, generate) + - Governance (enforce, patch) +- **AND** SHALL offer profile preset shortcuts: Solo developer, Backlog team, API-first team, Enterprise full-stack +- **AND** SHALL install the user-selected bundles before completing workspace initialisation + +#### Scenario: `init --profile` installs all profile bundles before completion + +- **GIVEN** a fresh SpecFact install with no bundles installed +- **WHEN** the user runs `specfact init --profile solo-developer` +- **THEN** the CLI SHALL invoke the module installer for each bundle in the profile's canonical set +- **AND** SHALL NOT report "Bootstrap complete" until all profile bundles are installed and their + commands are available in the CLI surface +- **AND** after the command completes, running `specfact code review run --help` SHALL succeed + without a "Command not installed" error + +#### Scenario: `init --profile` via uvx installs modules at user level + +- **GIVEN** the user is running via `uvx specfact-cli` +- **WHEN** they run `uvx specfact-cli init --profile solo-developer` +- **THEN** the CLI SHALL install profile bundles to the user-level module root + (e.g. `~/.specfact/modules/`) without requiring pip to be available in the uvx environment +- **AND** subsequent `uvx specfact-cli` invocations SHALL detect and load the installed modules + +#### Scenario: User selects a profile preset during first-run + +- **GIVEN** the first-run interactive UI is displayed +- **WHEN** the user selects "Enterprise full-stack" profile preset +- **THEN** the CLI SHALL auto-select bundles: project, backlog, codebase, spec, govern +- **AND** SHALL confirm the selection with a summary before installing +- **AND** SHALL install all five bundles via the module installer + +#### Scenario: User skips bundle selection during first-run + +- **GIVEN** the first-run interactive UI is displayed +- **WHEN** the user selects no bundles and confirms +- **THEN** the CLI SHALL install only core modules +- **AND** SHALL display a tip: "Install bundles later with `specfact module install `" +- **AND** SHALL complete workspace initialisation with only core commands available diff --git a/openspec/changes/docs-new-user-onboarding/specs/module-installation/spec.md b/openspec/changes/docs-new-user-onboarding/specs/module-installation/spec.md new file mode 100644 index 00000000..d3e1e002 --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/specs/module-installation/spec.md @@ -0,0 +1,152 @@ +## MODIFIED Requirements + +### Requirement: Upgrade command updates installed modules + +The system SHALL provide `specfact module upgrade [module-names...]` command that upgrades one or +more marketplace modules to their latest version. The command SHALL accept zero or more positional +module name arguments: no arguments upgrades all marketplace modules; one or more names restricts +the upgrade to only the named modules. + +The upgrade output SHALL distinguish between modules that were actually upgraded to a new version +and modules that were already at the latest version. Showing `0.41.16 -> 0.41.16` when no version +change occurred is incorrect and SHALL NOT happen. + +While the registry index is being fetched or a module is being installed, the CLI SHALL show visible +progress (for example a Rich status spinner) so the user knows work is ongoing. Rich progress MAY +be suppressed in automated test environments. + +Before upgrading any module where the latest registry version has a higher major version than the +installed version, the CLI SHALL warn the user and require confirmation, because major version +bumps may contain breaking changes. + +#### Scenario: Upgrade shows progress during registry fetch and install + +- **WHEN** user runs `specfact module upgrade` and the registry fetch or an install takes noticeable time +- **THEN** the CLI SHALL show visible progress during fetch and during each module install + +#### Scenario: Upgrade warns when registry index is unavailable + +- **WHEN** the registry index cannot be fetched (offline or network error) +- **THEN** the CLI SHALL print a clear warning that the registry is unavailable +- **AND** SHALL continue using installed metadata where possible for the upgrade decision + +#### Scenario: Upgrade a single named module to a newer minor/patch version + +- **WHEN** user runs `specfact module upgrade backlog` and `0.42.0` is available (current `0.41.16`) +- **THEN** system SHALL fetch registry index +- **AND** SHALL confirm a newer version exists +- **AND** SHALL install the newer version without prompting (minor/patch, not a major bump) +- **AND** SHALL output `backlog: 0.41.16 -> 0.42.0` + +#### Scenario: Upgrade multiple named modules selectively + +- **WHEN** user runs `specfact module upgrade backlog codebase` +- **THEN** system SHALL upgrade only `backlog` and `codebase` +- **AND** SHALL NOT upgrade any other installed modules +- **AND** SHALL report each module's result independently + +#### Scenario: Upgrade when module is already at latest version + +- **WHEN** user runs `specfact module upgrade backlog` and no newer version is available +- **THEN** system SHALL NOT reinstall the module +- **AND** SHALL output `backlog: already up to date (0.41.16)` or equivalent +- **AND** SHALL NOT output `backlog: 0.41.16 -> 0.41.16` + +#### Scenario: Upgrade all modules β€” mixed result (some upgraded, some current) + +- **WHEN** user runs `specfact module upgrade` with no arguments (all modules) +- **AND** some modules have newer versions and some do not +- **THEN** the output SHALL have two sections: + - `Upgraded:` listing only modules where the version actually changed + - `Already up to date:` listing modules that were already at the latest version +- **AND** if no modules were upgraded, the output SHALL say "All modules are up to date" + and SHALL NOT show any `X -> X` lines + +#### Scenario: Upgrade detects a breaking major version bump and prompts + +- **GIVEN** module `backlog` is installed at version `0.41.16` +- **AND** the registry offers version `1.0.0` as the latest +- **WHEN** user runs `specfact module upgrade backlog` in an interactive terminal +- **THEN** the CLI SHALL print a warning: + `backlog: 0.41.16 -> 1.0.0 is a MAJOR version upgrade and may contain breaking changes.` +- **AND** SHALL prompt: `Upgrade anyway? [y/N]` +- **AND** SHALL only proceed if the user confirms with `y` or `Y` +- **AND** if the user declines, SHALL skip that module and continue with remaining targets + +#### Scenario: Breaking major version upgrade bypassed with --yes flag + +- **GIVEN** module `backlog` has a major version bump available +- **WHEN** user runs `specfact module upgrade backlog --yes` +- **THEN** the CLI SHALL upgrade without prompting +- **AND** SHALL print the warning line but not the confirmation prompt + +#### Scenario: Breaking major version upgrade skipped silently in CI/CD mode + +- **GIVEN** the CLI is running in CI/CD (non-interactive) mode +- **AND** a module has a major version bump available +- **WHEN** user runs `specfact module upgrade` without `--yes` +- **THEN** the CLI SHALL skip the module with a warning: + `backlog: skipped β€” major version bump (0.41.16 -> 1.0.0). Re-run with --yes to upgrade.` +- **AND** SHALL exit 0 if all non-skipped modules succeeded + +#### Scenario: Upgrade reinstalls when newer version is available + +- **WHEN** a newer non-breaking version is available and the module is already installed +- **THEN** system SHALL replace existing installed files with the upgraded package +- **AND** SHALL NOT no-op due to existing install marker files + +## ADDED Requirements + +### Requirement: Install command accepts multiple module IDs in one invocation + +The system SHALL allow `specfact module install` to accept one or more module IDs as positional +arguments so users can install several modules in a single command, consistent with the UX of +standard package managers (apt, pip, brew, npm). + +#### Scenario: User installs multiple modules at once + +- **WHEN** user runs `specfact module install nold-ai/specfact-codebase nold-ai/specfact-code-review` +- **THEN** the system SHALL install all listed modules in sequence +- **AND** SHALL print an install confirmation line for each module +- **AND** SHALL stop and report failure if any module install fails, leaving already-installed + modules in place + +#### Scenario: User installs a single module (existing behaviour unchanged) + +- **WHEN** user runs `specfact module install nold-ai/specfact-codebase` +- **THEN** the system SHALL install the module exactly as before +- **AND** existing flags (`--scope`, `--source`, `--reinstall`, `--force`, `--skip-deps`) SHALL + apply to all modules in the invocation + +#### Scenario: Multi-install with one already-satisfied module + +- **WHEN** user runs `specfact module install A B` and A is already installed +- **THEN** the system SHALL skip A with the existing "already installed" message +- **AND** SHALL still install B +- **AND** SHALL exit 0 if all non-skipped installs succeed + +### Requirement: Uninstall command accepts multiple module names in one invocation + +The system SHALL allow `specfact module uninstall` to accept one or more module names as positional +arguments so users can remove several modules in a single command, consistent with the multi-install +behaviour and the UX of standard package managers. + +#### Scenario: User uninstalls multiple modules at once + +- **WHEN** user runs `specfact module uninstall nold-ai/specfact-codebase nold-ai/specfact-code-review` +- **THEN** the system SHALL uninstall all listed modules in sequence +- **AND** SHALL print an uninstall confirmation line for each module +- **AND** SHALL continue with remaining modules if one fails, then exit non-zero + +#### Scenario: User uninstalls a single module (existing behaviour unchanged) + +- **WHEN** user runs `specfact module uninstall nold-ai/specfact-codebase` +- **THEN** the system SHALL uninstall the module exactly as before +- **AND** existing flags (`--scope`, `--repo`) SHALL apply to all modules in the invocation + +#### Scenario: Multi-uninstall with one module not installed + +- **WHEN** user runs `specfact module uninstall A B` and A is not installed +- **THEN** the system SHALL report that A is not found +- **AND** SHALL still attempt to uninstall B +- **AND** SHALL exit non-zero if any module failed or was not found diff --git a/openspec/changes/docs-new-user-onboarding/specs/profile-presets/spec.md b/openspec/changes/docs-new-user-onboarding/specs/profile-presets/spec.md new file mode 100644 index 00000000..4e212ff0 --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/specs/profile-presets/spec.md @@ -0,0 +1,58 @@ +## MODIFIED Requirements + +### Requirement: Profile presets resolve to canonical bundle sets and install them + +The four profile presets SHALL resolve to the exact canonical bundle set and install each bundle +via the marketplace installer. The `solo-developer` profile SHALL include +`nold-ai/specfact-code-review` so that `specfact code review run` is available immediately after +running `specfact init --profile solo-developer`. + +#### Scenario: solo-developer profile installs codebase and code-review bundles + +- **GIVEN** a fresh SpecFact install or an install where specfact-codebase and specfact-code-review + are not yet installed +- **WHEN** the user runs `specfact init --profile solo-developer` +- **THEN** the CLI SHALL install `nold-ai/specfact-codebase` from the marketplace registry +- **AND** SHALL install `nold-ai/specfact-code-review` from the marketplace registry +- **AND** SHALL confirm: "Installed: specfact-codebase, specfact-code-review" +- **AND** after completion, `specfact code review run --path . --scope full` SHALL be available + and produce a scored review result + +#### Scenario: backlog-team profile installs three bundles in dependency order + +- **GIVEN** a fresh SpecFact install +- **WHEN** the user runs `specfact init --profile backlog-team` +- **THEN** the CLI SHALL install: `specfact-project`, `specfact-backlog`, `specfact-codebase` +- **AND** SHALL install `specfact-project` before `specfact-backlog` + +#### Scenario: api-first-team profile installs spec and codebase bundles + +- **GIVEN** a fresh SpecFact install +- **WHEN** the user runs `specfact init --profile api-first-team` +- **THEN** the CLI SHALL install: `specfact-spec`, `specfact-codebase` +- **AND** `specfact-project` SHALL be auto-installed if required as a transitive dependency + +#### Scenario: enterprise-full-stack profile installs all five bundles + +- **GIVEN** a fresh SpecFact install +- **WHEN** the user runs `specfact init --profile enterprise-full-stack` +- **THEN** the CLI SHALL install all five bundles: + `specfact-project`, `specfact-backlog`, `specfact-codebase`, `specfact-spec`, `specfact-govern` + +#### Scenario: Profile canonical bundle mapping is machine-verifiable + +- **GIVEN** a request for any valid profile name +- **WHEN** `specfact init --profile ` is executed +- **THEN** the resolved bundle set SHALL be: + - `solo-developer` β†’ `[specfact-codebase, specfact-code-review]` + - `backlog-team` β†’ `[specfact-project, specfact-backlog, specfact-codebase]` + - `api-first-team` β†’ `[specfact-spec, specfact-codebase]` + - `enterprise-full-stack` β†’ `[specfact-project, specfact-backlog, specfact-codebase, specfact-spec, specfact-govern]` +- **AND** no profile SHALL install bundles outside its canonical set + +#### Scenario: Invalid profile name produces actionable error + +- **GIVEN** the user runs `specfact init --profile unknown-profile` +- **WHEN** the CLI processes the command +- **THEN** the CLI SHALL print an error listing valid profile names: + solo-developer, backlog-team, api-first-team, enterprise-full-stack diff --git a/openspec/changes/docs-new-user-onboarding/tasks.md b/openspec/changes/docs-new-user-onboarding/tasks.md new file mode 100644 index 00000000..281fcdd4 --- /dev/null +++ b/openspec/changes/docs-new-user-onboarding/tasks.md @@ -0,0 +1,240 @@ +## Current status (rolling) + +**Branch:** `feature/docs-new-user-onboarding` (worktree active; tracks `origin/feature/docs-new-user-onboarding`). +**Release packaging:** Patch **0.45.1** β€” `pyproject.toml`, `setup.py`, `src/__init__.py`, `src/specfact_cli/__init__.py`, and `CHANGELOG.md` aligned (see top changelog section). +**Done on branch:** Tasks **1–4, 6–7c, 8–10**; **7d** partial (7d.9, 7d.10, 7d.17; install `--yes` on upgrade from 7b; bundle-dep tests in `test_bundle_dependency_install.py`); **core_compatibility** messaging and registry parsing for versioned `bundle_dependencies`. +**Open / follow-up:** **5** (scope UX) β€” **specfact-code-review** module repo; **7d.11–7d.16, 7d.18** β€” full resolver graph, `--dry-run`, registry index objects in modules repo; **11.1** β€” merge deltas to `openspec/specs/` when project sync workflow is available; **12–13** β€” re-run gates before merge, PR to `dev`, then archive. +**Evidence:** `TDD_EVIDENCE.md` (contract-test + yaml-lint + unit suite on 2026-04-02); re-run **7.1–7.2** and **12.1–12.3** before final PR if `main`/`dev` moved. + +--- + +## 1. Investigate and locate bug roots + +- [x] 1.1 Find where `specfact init --profile ` is handled in the init module source + and confirm it is NOT calling the module installer (Bug 1 root cause) +- [x] 1.2 Find the module installer path and confirm why it fails under uvx + ("No module named pip") β€” identify whether the fix is in the installer or in how + uvx-bundled environments should be detected (Bug 2 root cause) +- [x] 1.3 Find the canonical profile β†’ bundle mapping definition and confirm it does not + include `specfact-code-review` for `solo-developer` (Bug 3 root cause) +- [x] 1.4 Find the `code review run --path .` error path and confirm it does not suggest + `--scope full` as the corrective action (Bug 4 root cause) +- [x] 1.5 Record baseline failing test evidence in `TDD_EVIDENCE.md`: + - run `specfact init --profile solo-developer` β†’ confirm no modules installed + - run `specfact code review run --path . --scope full` β†’ confirm "Command not installed" + - run `specfact module install nold-ai/specfact-code-review` under uvx β†’ confirm pip error + - run `specfact code review run --path .` β†’ confirm unhelpful git-diff error + *(superseded by post-fix evidence in `TDD_EVIDENCE.md`)* + +## 2. Fix `init --profile` module installation (Bug 1) + +- [x] 2.1 Write failing test: `init --profile solo-developer` installs `specfact-codebase` + and `specfact-code-review` and they appear in `module list` output +- [x] 2.2 Implement fix: `init --profile` MUST call `module install` for each bundle in the + profile's canonical set after runtime bootstrap +- [x] 2.3 Ensure `init` outputs a confirmation line per installed bundle: + "Installed: specfact-codebase, specfact-code-review" +- [x] 2.4 Ensure `init --profile` does NOT report "Bootstrap complete" until all bundles + are installed and their commands are registered + +## 3. Fix module install under uvx (Bug 2) + +- [x] 3.1 Write failing test: `specfact module install nold-ai/specfact-code-review` in a + uvx-isolated environment succeeds (does not require pip) +- [x] 3.2 Implement fix: module installation SHALL use a pip-free path when running under uvx + (use bundled package artifacts or uv-native install, not `pip install`) +- [ ] 3.3 Verify `module install` succeeds in a clean uvx context with no user-level pip + *(manual / CI smoke; not automated in this repo)* + +## 4. Add `specfact-code-review` to `solo-developer` profile (Bug 3) + +- [x] 4.1 Write failing test: `solo-developer` profile canonical set includes both + `specfact-codebase` and `specfact-code-review` +- [x] 4.2 Update the profile canonical bundle mapping to add `specfact-code-review` to + `solo-developer` +- [ ] 4.3 Verify end-to-end: after `specfact init --profile solo-developer`, running + `specfact code review run --path . --scope full` in a git repo produces a scored result + *(manual verification after PR; requires marketplace modules)* + +## 5. Fix `code review run --path .` scope error (Bug 4) + +**Note:** `code review run` is implemented in the **specfact-code-review** module (`nold-ai/specfact-cli-modules`); scope/diff behaviour should be fixed there. Docs now steer users to `--scope full` on the uvx path. + +- [ ] 5.1 Write failing test: running `specfact code review run --path .` in a git repo with + no staged changes produces an error that includes `--scope full` as the corrective command +- [ ] 5.2 Implement fix: either (a) default to `--scope full` when no git diff is available, + OR (b) emit a specific error: "No changed files detected. Run with `--scope full` to + review all tracked files." +- [ ] 5.3 Verify the error or default behaviour is consistent between uvx and pip-installed CLI + +## 6. Improve module-not-found error message (UX) + +- [x] 6.1 Write failing test: running `uvx specfact-cli code review run` with no modules + installed produces an error that includes `uvx specfact-cli init --profile solo-developer` +- [x] 6.2 Implement fix: the module-not-found error for command groups SHALL include a + copy-pasteable init command, not just the generic "install workflow bundles" message +- [x] 6.3 Verify the message is correct for both uvx and pip-installed CLI contexts + +## 7. Run pre-docs TDD gate + +- [x] 7.1 Run `hatch run contract-test` β€” confirm passing *(passed 2026-04-02 per `TDD_EVIDENCE.md`; **re-run before PR merge** if base moved)* +- [ ] 7.2 Run `hatch run smart-test` β€” confirm passing *(run before PR merge; use `smart-test-full` if touching `src/` broadly)* +- [x] 7.3 Run `hatch run format` and `hatch run type-check` β€” confirm zero errors +- [x] 7.4 Record post-fix passing evidence in `TDD_EVIDENCE.md` +- [ ] 7.5 End-to-end manual test on a clean machine: `uvx specfact-cli init --profile solo-developer` + then `uvx specfact-cli code review run --path . --scope full` β†’ confirm scored output + +## 7b. Fix `module upgrade` output and add selective + breaking-change gate + +- [x] 7b.1 Write failing test: `module upgrade` when all modules are at latest version outputs + "All modules are up to date" and contains no `X -> X` lines +- [x] 7b.2 Write failing test: `module upgrade` when one module has a newer minor version shows + it in "Upgraded:" and unchanged modules in "Already up to date:" +- [x] 7b.3 Write failing test: `module upgrade backlog codebase` upgrades only those two modules +- [x] 7b.4 Write failing test: major version bump (0.x β†’ 1.x) in interactive mode prompts the + user; declining skips the module; accepting upgrades it +- [x] 7b.5 Write failing test: major version bump with `--yes` upgrades without prompting +- [x] 7b.6 Write failing test: major version bump in CI/CD mode is skipped with a warning, + exit 0 when remaining non-major modules succeed +- [x] 7b.7 Change `upgrade` Argument from `module_name: str | None` to `module_names: list[str]` + with `typer.Argument(default=[])`; update `_upgrade_module_name_optional` guard; + empty list = upgrade all (existing `--all` behaviour remains as alias) +- [x] 7b.8 Update `_resolve_upgrade_target_ids` to accept a list of names +- [x] 7b.9 Before calling `install_module`, look up `latest_version` from registry index; + skip reinstall when `latest_version == current_version` (populate `up_to_date` list) +- [x] 7b.10 Add semver major-bump detection: if `int(latest.split('.')[0]) > int(current.split('.')[0])`, + gate on `--yes` flag or interactive prompt; auto-skip in CI/CD mode with warning +- [x] 7b.11 Add `--yes` / `-y` flag to `upgrade` command for non-interactive major-bump approval +- [x] 7b.12 Update output sections: "Upgraded:", "Already up to date:", "Skipped (major bump):" +- [ ] 7b.13 Verify end-to-end: `module upgrade` with current modules β†’ "All modules are up to date" + *(manual smoke with real marketplace modules)* + +## 7c. Multi-module install and uninstall + +- [x] 7c.1 Write failing test: `specfact module install A B` installs both A and B +- [x] 7c.2 Write failing test: `specfact module install A B` where A is already installed β€” + skips A, installs B, exits 0 +- [x] 7c.3 Change `install` Argument from `module_id: str` to `module_ids: list[str]`; + update `@require` guard; loop through each id using existing install logic +- [x] 7c.4 Exit non-zero only if at least one module failed (not if skipped/already installed) +- [x] 7c.5 Verify: single-module install still works identically; all existing flags apply +- [x] 7c.6 Write failing test: `specfact module uninstall A B` uninstalls both A and B +- [x] 7c.7 Write failing test: `specfact module uninstall A B` where A is not installed β€” + reports A not found, still attempts B, exits non-zero + *(Catches `click.exceptions.Exit` from `typer.Exit`; upgrade uses `Optional[list[str]]` for Click 8.1 + Typer 0.23.)* +- [x] 7c.8 Change `uninstall` Argument from `module_name: str` to `module_names: list[str]`; + update `@require` guard; loop through each name using existing uninstall logic +- [x] 7c.9 Verify: single-module uninstall still works identically; `--scope`/`--repo` apply + +## 7d. Version-aware bundle dependency resolution + +**Progress:** 7d.9, 7d.10, 7d.17 implemented; `test_bundle_dependency_install.py` covers ordered bundle install; full interactive resolver / `--dry-run` / circular graph (7d.11–7d.16) **not** wired β€” see `TDD_EVIDENCE.md` β€œDeferred / follow-up”. + +- [ ] 7d.1 Write failing test: installing a module whose `bundle_dependencies` lists a module + not installed prompts the user and installs the dep on confirmation +- [ ] 7d.2 Write failing test: installing a module whose declared dep version specifier is + not satisfied by the installed version prompts to upgrade, aborts on decline +- [ ] 7d.3 Write failing test: dep already satisfies specifier β€” no prompt, INFO log only +- [ ] 7d.4 Write failing test: `module install A --yes` auto-installs/upgrades all unmet deps +- [ ] 7d.5 Write failing test: CI/CD mode with unmet dep exits non-zero without silent install +- [ ] 7d.6 Write failing test: `module install A --dry-run` prints plan and exits 0 with no changes +- [ ] 7d.7 Write failing test: circular dep Aβ†’Bβ†’A is detected and aborts with clear message +- [ ] 7d.8 Write failing test: upgrade re-evaluates new version's deps; prompts if new dep + requirements are introduced or tightened +- [x] 7d.9 Write failing test: `core_compatibility` mismatch prints version, required range, + and corrective command β€” not a bare exception +- [x] 7d.10 Extend registry index parser: `_extract_bundle_dependencies` SHALL handle both + plain string entries and `{"id": "...", "version": "..."}` object entries; return + `list[tuple[str, str | None]]` (module_id, version_specifier_or_None) +- [ ] 7d.11 Add `resolve_module_dependencies(targets, installed_modules, registry_index)` to + `dependency_resolver.py`: for each dep, check if installed and if version satisfies + specifier; return `ResolutionPlan(to_install, to_upgrade, satisfied, conflicts)` +- [ ] 7d.12 Add circular dependency detection to `resolve_module_dependencies` using a + visited-set DFS over the dependency graph +- [x] 7d.13 Add `--yes` flag to `install` and `upgrade` commands (if not already added in 7b.11) + to enable non-interactive auto-resolution *(upgrade has `--yes`; install dep auto-resolve: TBD)* +- [ ] 7d.14 Add `--dry-run` flag to `install` and `upgrade`; print `ResolutionPlan` and exit 0 +- [ ] 7d.15 Wire `resolve_module_dependencies` into `_install_bundle_dependencies_for_module`: + call it before any install; prompt user for each `to_install` / `to_upgrade` entry; + auto-resolve if `--yes`; abort if user declines or CI mode and deps unmet +- [ ] 7d.16 Wire dep re-evaluation into `_run_marketplace_upgrades`: after fetching new version + manifest, call `resolve_module_dependencies` before placing the upgraded module +- [x] 7d.17 Replace the bare `ValueError("Module is incompatible with current SpecFact CLI version")` + in `module_installer.py:726` with a structured message: + `" requires SpecFact CLI but you have . Run: specfact upgrade"` +- [ ] 7d.18 Update `registry/index.json` in specfact-cli-modules to use versioned + `bundle_dependencies` objects where constraints exist (e.g. specfact-codebase β†’ project) +- [ ] 7d.19 Run full contract-test and smart-test suite; confirm no regressions + +## 8. Homepage Rewrite (`docs/index.md`) + +- [x] 8.1 Replace the opening paragraph with a plain-language outcome-first hero statement + ("Point it at your code. Get a score and a list of what to fix.") +- [x] 8.2 Add inline 2-command uvx fenced code block immediately after the hero: + `uvx specfact-cli init --profile solo-developer` + + `uvx specfact-cli code review run --path . --scope full` +- [x] 8.3 Add a description of the expected output adjacent to the block + (e.g. "You'll see: Verdict: FAIL | Score: 0 | 64 findings") +- [x] 8.4 Add "Read the full quickstart β†’" link adjacent to the block +- [x] 8.5 Replace the 4-card "Choose Your Path" section with 3 outcome-oriented cards: + "See what's wrong with your code right now" / + "Set up IDE slash-command workflows" / + "Add a pre-commit or CI gate" +- [x] 8.6 Rewrite the "Core Platform" section β€” remove the jargon bullet; keep init/module/upgrade +- [x] 8.7 Verify: no architectural jargon terms above the fold before path cards +- [x] 8.8 Verify: all existing Architecture/Reference/Migration section links still resolve + +## 9. Installation Page Restructure (`docs/getting-started/installation.md`) + +- [x] 9.1 Add "## Try it now β€” no install required" as the first H2, showing the uvx 2-command + sequence with expected output description +- [x] 9.2 Add "## Install for persistent use" as the next H2 (pip path) +- [x] 9.3 Move Container and GitHub Action options to "## More options" section +- [x] 9.4 Remove the "Limitations" warning from the uvx section +- [x] 9.5 Move "Operational Modes", "Installed Command Topology", and profile table below + "More options" + *(Under `## First Steps`, immediately after Container/GitHub Action.)* +- [x] 9.6 Add visible anchor link "More options ↓" after the pip section +- [x] 9.7 Verify: front-matter unchanged; no broken links + +## 10. Quickstart Reframe (`docs/getting-started/quickstart.md`) + +- [x] 10.1 Rewrite the intro so it leads with the uvx path and the vibe-coder audience +- [x] 10.2 Ensure Step 1 is the uvx init command, not pip install +- [x] 10.3 Verify: front-matter, redirect_from, and all 6 steps are intact + +## 11. Spec Sync + +- [x] 11.0 GitHub backlog: issue [#476](https://github.com/nold-ai/specfact-cli/issues/476) with labels `enhancement`, `change-proposal`, `documentation`, `openspec`; parent feature [#356](https://github.com/nold-ai/specfact-cli/issues/356); related [#466](https://github.com/nold-ai/specfact-cli/issues/466) β€” `proposal.md` Source Tracking updated +- [ ] 11.1 Run `openspec sync --change docs-new-user-onboarding` to merge all 10 spec deltas + *(blocked: OpenSpec CLI in this environment has no `sync` subcommand β€” use project workflow when available)* +- [ ] 11.2 Confirm `openspec/specs/docs-aha-moment-entry/spec.md` created + *(delta exists: `openspec/changes/docs-new-user-onboarding/specs/docs-aha-moment-entry/spec.md`; **not** merged to main specs yet)* +- [ ] 11.3 Confirm `openspec/specs/docs-vibecoder-entry-path/spec.md` created + *(delta exists: `openspec/changes/docs-new-user-onboarding/specs/docs-vibecoder-entry-path/spec.md`; **not** merged to main specs yet)* +- [x] 11.4 Confirm `openspec/specs/dependency-resolution/spec.md` created *(main spec present; delta under this change may still differ until 11.1)* +- [ ] 11.5 Confirm MODIFIED requirements in `entrypoint-onboarding`, `first-contact-story`, + `first-run-selection`, `profile-presets`, and `module-installation` specs are updated + *(deltas under `openspec/changes/docs-new-user-onboarding/specs/`; merge to `openspec/specs/` pending 11.1)* + +## 12. Final Validation and Evidence + +- [x] 12.1 Run `hatch run yaml-lint` β€” confirm zero failures *(passed 2026-04-02 per `TDD_EVIDENCE.md`; **re-run before PR** if YAML/workflows changed since)* +- [x] 12.2 Run `hatch run contract-test` β€” confirm passing *(passed 2026-04-02 per `TDD_EVIDENCE.md`; **re-run before PR** if contracts/sources changed since)* +- [ ] 12.3 Run `hatch run specfact code review run --json --out .specfact/code-review.json` + and confirm zero findings on modified Python files *(before PR)* +- [ ] 12.4 Build docs locally (`bundle exec jekyll serve`) and manually verify: + homepage hero + code block, 3 path cards, installation uvx-first, quickstart uvx-led +- [ ] 12.5 Manual end-to-end on a clean machine: full uvx wow path works in under 15 seconds +- [x] 12.6 Record final passing evidence in `TDD_EVIDENCE.md` +- [x] 12.7 Update `openspec/CHANGE_ORDER.md` with this change entry + +## 13. PR and Cleanup + +- [x] 13.1 Create feature branch `feature/docs-new-user-onboarding` from `origin/dev` *(branch exists; pushed to `origin`)* +- [x] 13.2 Commit CLI fixes: `fix: init --profile installs profile modules, fix module-install under uvx` + *(landed on branch β€” see git log; may be squashed across commits)* +- [x] 13.3 Commit docs: `docs: vibe-coder entry path β€” uvx hero, code review wow moment` + *(landed on branch β€” see git log; follow-on commits include README, dependency-profile work, 0.45.1 changelog)* +- [ ] 13.4 Open PR against `dev` referencing this change and the three CLI bugs fixed *(or update existing PR; confirm checks green)* +- [ ] 13.5 After merge, archive: `openspec archive docs-new-user-onboarding` diff --git a/pyproject.toml b/pyproject.toml index 8466bb8a..3665ae77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "specfact-cli" -version = "0.44.0" +version = "0.45.1" description = "The swiss knife CLI for agile DevOps teams. Keep backlog, specs, tests, and code in sync with validation and contract enforcement for new projects and long-lived codebases." readme = "README.md" requires-python = ">=3.11" @@ -36,6 +36,10 @@ keywords = [ "cli", "specfact", ] +# Install profiles (PEP 621 extras): +# - `pip install specfact-cli` β€” minimal runtime (CLI, registry, contracts via icontract/beartype, no CrossHair/Hypothesis wheels). +# - `pip install specfact-cli[contracts]` β€” add CrossHair + Hypothesis for contract exploration / property tooling. +# - `pip install specfact-cli[dev]` β€” contributors: pytest, linters, and same contract tools as [contracts] (see dev list). classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", @@ -51,16 +55,17 @@ classifiers = [ dependencies = [ # Core dependencies "pydantic>=2.12.3", - "python-dotenv>=1.2.1", "typing-extensions>=4.15.0", "PyYAML>=6.0.3", "requests>=2.32.3", "azure-identity>=1.17.1", "cryptography>=43.0.0", - "cffi>=1.17.1", - + "packaging>=24.0", + # PEP 440 / markers for module installer and registry (do not rely on transitive pins only) + # CLI framework - "typer>=0.20.0", + # Typer 0.24+ requires click>=8.2.1 (generic Choice[...]); dev semgrep pins click~=8.1.8 β€” cap Typer. + "typer>=0.20.0,<0.24", "rich>=13.5.2,<13.6.0", # Compatible with semgrep (requires rich~=13.5.2) "questionary>=2.0.1", # Interactive prompts with arrow key navigation @@ -80,16 +85,10 @@ dependencies = [ # Schema validation "jsonschema>=4.23.0", - # Contract-First Development Dependencies + # Contract-First (runtime decorators; exploration tools are optional extra `contracts`) "icontract>=2.7.1", # Design-by-contract decorators "beartype>=0.22.4", # Runtime type checking - "crosshair-tool>=0.0.97", # Contract exploration and counterexample discovery - "hypothesis>=6.142.4", # Property-based testing - - # Code analysis - "ruff>=0.14.2", - "radon>=6.0.1", - + # File system watching "watchdog>=6.0.0", @@ -99,7 +98,13 @@ dependencies = [ ] [project.optional-dependencies] +contracts = [ + "crosshair-tool>=0.0.97", + "hypothesis>=6.142.4", +] + dev = [ + "setuptools>=69.0.0", "pytest>=8.4.2", "pytest-cov>=7.0.0", "pytest-mock>=3.15.1", @@ -114,12 +119,14 @@ dev = [ "types-PyYAML>=6.0.12.20250516", "pip-tools>=7.5.1", "semgrep>=1.144.0", # Latest version compatible with rich~=13.5.2 - - # Contract-First Development Dependencies (dev) - "icontract>=2.7.1", - "beartype>=0.22.4", + + # Same contract exploration stack as [contracts] (extras cannot self-reference) "crosshair-tool>=0.0.97", "hypothesis>=6.142.4", + + # Contract-First Development Dependencies (dev) + "icontract>=2.7.1", + "beartype>=0.22.4", # Enhanced Analysis Tools (for local development) # Note: syft excluded from dev/test due to rich version conflict with semgrep @@ -165,12 +172,13 @@ specfact-cli = "specfact_cli.cli:cli_main" # Alias for uvx compatibility [tool.hatch.envs.default] python = "3.12" dependencies = [ + # Semgrep pulls opentelemetry; some versions import pkg_resources (setuptools) + "setuptools>=69.0.0", "pip-tools", "pytest", "pytest-cov", "pytest-mock", "pytest-xdist", - "python-dotenv", "pre-commit", # Ensure format/lint tools are available in the hatch env "isort>=7.0.0", @@ -204,6 +212,9 @@ governance = "pylint src tests tools --reports=y --output-format=parseable" format = "ruff check . --fix && ruff format ." # Code scanning (Semgrep) +# Semgrep 1.38+ deprecated implicit .semgrep.yml discovery; always pass --config explicitly. +# Script must not be named `semgrep` (Hatch treats that as circular expansion with the semgrep CLI). +semgrep-full = "semgrep --config tools/semgrep {args}" scan = "semgrep --config tools/semgrep/async.yml {args}" scan-all = "semgrep --config tools/semgrep/async.yml ." scan-json = "semgrep --config tools/semgrep/async.yml --json . > logs/semgrep-results.json" diff --git a/resources/templates/github-action.yml.j2 b/resources/templates/github-action.yml.j2 index 29ffbea0..1f5dde1e 100644 --- a/resources/templates/github-action.yml.j2 +++ b/resources/templates/github-action.yml.j2 @@ -1,5 +1,7 @@ # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json # yamllint disable rule:line-length rule:truthy +# Generated workflow β€” aligns with current SpecFact CLI: non-interactive CI must run +# `specfact init --profile …` (or `specfact module install …`) before `specfact code …` commands. name: SpecFact CLI Validation on: @@ -18,7 +20,7 @@ on: workflow_dispatch: inputs: budget: - description: "Time budget in seconds" + description: "Time budget in seconds for code repro" required: false default: "{{ budget }}" type: string @@ -32,14 +34,19 @@ on: - warn - log version_check_mode: - description: "Version check mode (info, warn, block)" + description: "Project bundle version check (info, warn, block; skipped if no .specfact/projects)" required: false - default: "warn" + default: "info" type: choice options: - info - warn - block + bundle_name: + description: "Project bundle name for `specfact project version check` (under .specfact/projects/)" + required: false + default: "{{ bundle_name }}" + type: string jobs: specfact-validation: @@ -59,53 +66,75 @@ jobs: python-version: "{{ python_version }}" cache: "pip" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install hatch - - name: Install SpecFact CLI run: | + python -m pip install --upgrade pip echo "πŸ“¦ Installing SpecFact CLI..." pip install specfact-cli + - name: Bootstrap SpecFact (CI β€” required for workflow commands) + run: | + # Non-interactive CI must pass --profile or --install (see `specfact init --help`). + specfact init --profile solo-developer --repo . + - name: Set validation parameters id: validation run: | - BUDGET="${INPUT_BUDGET:-{{ budget }}}" - MODE="${INPUT_MODE:-block}" - VERSION_CHECK_MODE="${INPUT_VERSION_CHECK_MODE:-info}" - echo "budget=$BUDGET" >> $GITHUB_OUTPUT - echo "mode=$MODE" >> $GITHUB_OUTPUT - echo "version_check_mode=$VERSION_CHECK_MODE" >> $GITHUB_OUTPUT - echo "SPECFACT_BUDGET=$BUDGET" >> $GITHUB_ENV - echo "SPECFACT_MODE=$MODE" >> $GITHUB_ENV - echo "SPECFACT_VERSION_CHECK_MODE=$VERSION_CHECK_MODE" >> $GITHUB_ENV + BUDGET="{% raw %}${{ github.event.inputs.budget || '{% endraw %}{{ budget }}{% raw %}' }}{% endraw %}" + MODE="{% raw %}${{ github.event.inputs.mode || 'block' }}{% endraw %}" + VERSION_CHECK_MODE="{% raw %}${{ github.event.inputs.version_check_mode || 'info' }}{% endraw %}" + BUNDLE_NAME="{% raw %}${{ github.event.inputs.bundle_name || '{% endraw %}{{ bundle_name }}{% raw %}' }}{% endraw %}" + echo "budget=$BUDGET" >> "$GITHUB_OUTPUT" + echo "mode=$MODE" >> "$GITHUB_OUTPUT" + echo "version_check_mode=$VERSION_CHECK_MODE" >> "$GITHUB_OUTPUT" + echo "bundle_name=$BUNDLE_NAME" >> "$GITHUB_OUTPUT" + echo "SPECFACT_BUDGET=$BUDGET" >> "$GITHUB_ENV" + echo "SPECFACT_MODE=$MODE" >> "$GITHUB_ENV" + echo "SPECFACT_VERSION_CHECK_MODE=$VERSION_CHECK_MODE" >> "$GITHUB_ENV" + echo "BUNDLE_NAME=$BUNDLE_NAME" >> "$GITHUB_ENV" + + - name: Optional β€” CrossHair / repro tooling setup + continue-on-error: true + run: specfact code repro setup - - name: Run Contract Validation + - name: Run contract validation (code repro) id: repro continue-on-error: true run: | - specfact repro --verbose --budget {% raw %}${{ steps.validation.outputs.budget }}{% endraw %} || true - echo "exit_code=$?" >> $GITHUB_OUTPUT + set +e + specfact code repro --verbose --budget "{% raw %}${{ steps.validation.outputs.budget }}{% endraw %}" + ec=$? + set -e + echo "exit_code=$ec" >> "$GITHUB_OUTPUT" - - name: Version check + - name: Project bundle version check (optional) id: version_check continue-on-error: true run: | VERSION_CHECK_MODE="{% raw %}${{ steps.validation.outputs.version_check_mode }}{% endraw %}" - echo "πŸ“Œ Checking bundle version recommendation (mode: $VERSION_CHECK_MODE)..." - if specfact project version check --repo .; then - echo "version_check_passed=true" >> $GITHUB_OUTPUT + BUNDLE_NAME="{% raw %}${{ steps.validation.outputs.bundle_name }}{% endraw %}" + echo "πŸ“Œ Bundle version check (mode: $VERSION_CHECK_MODE, bundle: $BUNDLE_NAME)..." + if [ ! -d .specfact/projects ]; then + echo "No .specfact/projects β€” skipping project version check." + echo "version_check_passed=skipped" >> "$GITHUB_OUTPUT" + exit 0 + fi + if ! find .specfact/projects -mindepth 1 -maxdepth 1 -type d 2>/dev/null | grep -q .; then + echo "No project bundles under .specfact/projects β€” skipping." + echo "version_check_passed=skipped" >> "$GITHUB_OUTPUT" + exit 0 + fi + if specfact project version check --repo . --bundle "$BUNDLE_NAME"; then + echo "version_check_passed=true" >> "$GITHUB_OUTPUT" else - echo "version_check_passed=false" >> $GITHUB_OUTPUT + echo "version_check_passed=false" >> "$GITHUB_OUTPUT" if [ "$VERSION_CHECK_MODE" = "warn" ]; then - echo "⚠️ Version check recommendation not followed (warn mode - continuing)" + echo "⚠️ Version check did not pass (warn mode β€” continuing)" elif [ "$VERSION_CHECK_MODE" = "block" ]; then - echo "❌ Version check recommendation not followed (block mode - failing)" + echo "❌ Version check failed (block mode)" exit 1 else - echo "ℹ️ Version check recommendation available (info mode - continuing)" + echo "ℹ️ Version check finished (info mode β€” continuing)" fi fi @@ -117,28 +146,28 @@ jobs: if [ -d "$REPORT_DIR" ]; then LATEST_REPORT=$(find "$REPORT_DIR" -name "report-*.yaml" -type f -printf "%T@ %p\n" | sort -n | tail -1 | cut -d' ' -f2-) if [ -n "$LATEST_REPORT" ]; then - echo "path=$LATEST_REPORT" >> $GITHUB_OUTPUT - echo "SPECFACT_REPORT_PATH=$LATEST_REPORT" >> $GITHUB_ENV + echo "path=$LATEST_REPORT" >> "$GITHUB_OUTPUT" + echo "SPECFACT_REPORT_PATH=$LATEST_REPORT" >> "$GITHUB_ENV" fi fi - name: Create GitHub annotations id: annotations - if: always() && {% raw %}steps.report.outputs.path != ''{% endraw %} + if: {% raw %}${{ always() && steps.report.outputs.path != '' }}{% endraw %} run: | python -m specfact_cli.utils.github_annotations || true - name: Generate PR comment id: pr-comment - if: always() && {% raw %}github.event_name == 'pull_request' && steps.report.outputs.path != ''{% endraw %} + if: {% raw %}${{ always() && github.event_name == 'pull_request' && steps.report.outputs.path != '' }}{% endraw %} run: | python -m specfact_cli.utils.github_annotations if [ -f ".specfact/pr-comment.md" ]; then - echo "comment_path=.specfact/pr-comment.md" >> $GITHUB_OUTPUT + echo "comment_path=.specfact/pr-comment.md" >> "$GITHUB_OUTPUT" fi - name: Post PR comment - if: always() && {% raw %}github.event_name == 'pull_request' && steps.pr-comment.outputs.comment_path != ''{% endraw %} + if: {% raw %}${{ always() && github.event_name == 'pull_request' && steps.pr-comment.outputs.comment_path != '' }}{% endraw %} uses: actions/github-script@v7 with: script: | @@ -165,8 +194,7 @@ jobs: if-no-files-found: ignore - name: Fail workflow if validation failed - if: {% raw %}steps.repro.outputs.exit_code != '0' && steps.validation.outputs.mode == 'block'{% endraw %} + if: {% raw %}${{ steps.repro.outputs.exit_code != '0' && steps.validation.outputs.mode == 'block' }}{% endraw %} run: | echo "❌ Validation failed. Exiting with error code." exit 1 - diff --git a/resources/templates/pr-template.md.j2 b/resources/templates/pr-template.md.j2 index d32006cb..29fee4be 100644 --- a/resources/templates/pr-template.md.j2 +++ b/resources/templates/pr-template.md.j2 @@ -14,6 +14,15 @@ ## βœ… Validation +**CI / local prerequisites (SpecFact v0.40+):** + +- Install: `pip install specfact-cli` (or `uvx specfact-cli@latest` for one-off runs). +- **Non-interactive / CI** must bootstrap workflow bundles before `specfact code …` or `specfact project …`: + - `specfact init --profile solo-developer --repo .` (or another profile / `specfact init --install …`), **or** + - `specfact module install nold-ai/specfact-codebase` (and other bundles as needed). +- Contract repro in CI uses **`specfact code repro`** (not `specfact repro`). Optional: `specfact code repro setup` for CrossHair config. +- Optional `specfact project version check` needs a project under `.specfact/projects//` and `--bundle `. + **SpecFact CLI Validation Results:** {% if validation_passed %} diff --git a/scripts/pre-commit-smart-checks.sh b/scripts/pre-commit-smart-checks.sh index 5876e458..1606b8f3 100755 --- a/scripts/pre-commit-smart-checks.sh +++ b/scripts/pre-commit-smart-checks.sh @@ -185,15 +185,23 @@ run_actionlint_if_needed() { } run_code_review_gate() { - local py_files - py_files=$(staged_python_files) - if [ -z "${py_files}" ]; then + # Build a bash array so we invoke pre_commit_code_review.py exactly once. Using xargs + # here can split into multiple subprocesses when the argument list is long (default + # max-chars), each overwriting .specfact/code-review.json β€” yielding partial or empty + # findings and a misleading artifact. + local py_array=() + while IFS= read -r line; do + [ -z "${line}" ] && continue + py_array+=("${line}") + done < <(staged_python_files) + + if [ ${#py_array[@]} -eq 0 ]; then info "ℹ️ No staged Python files β€” skipping code review gate" return fi info "πŸ›‘οΈ Running code review gate on staged Python files" - if echo "${py_files}" | xargs -r hatch run python scripts/pre_commit_code_review.py; then + if hatch run python scripts/pre_commit_code_review.py "${py_array[@]}"; then success "βœ… Code review gate passed" else error "❌ Code review gate failed" diff --git a/setup.py b/setup.py index f2d563d2..8a34c4ed 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -"""Setup script for specfact-cli package.""" +"""Setup script for specfact-cli package (kept in sync with pyproject.toml [project].dependencies).""" from setuptools import find_packages, setup @@ -7,7 +7,7 @@ if __name__ == "__main__": _setup = setup( name="specfact-cli", - version="0.44.0", + version="0.45.1", description=( "The swiss knife CLI for agile DevOps teams. Keep backlog, specs, tests, and code in sync with " "validation and contract enforcement for new projects and long-lived codebases." @@ -15,21 +15,26 @@ packages=find_packages(where="src"), package_dir={"": "src"}, install_requires=[ - "pydantic>=2.11.5", - "python-dotenv>=1.1.0", - "PyYAML>=6.0.2", + "pydantic>=2.12.3", + "typing-extensions>=4.15.0", + "PyYAML>=6.0.3", "requests>=2.32.3", "azure-identity>=1.17.1", "cryptography>=43.0.0", - "cffi>=1.17.1", - "typer>=0.15.0", + "packaging>=24.0", + "typer>=0.20.0,<0.24", "rich>=13.5.2,<13.6.0", - "jinja2>=3.1.0", - "networkx>=3.2", - "gitpython>=3.1.0", + "questionary>=2.0.1", + "jinja2>=3.1.6", + "networkx>=3.4.2", + "graphviz>=0.20.1", + "gitpython>=3.1.45", + "ruamel.yaml>=0.18.16", + "jsonschema>=4.23.0", "icontract>=2.7.1", - "beartype>=0.22.2", - "crosshair-tool>=0.0.97", - "hypothesis>=6.140.3", + "beartype>=0.22.4", + "watchdog>=6.0.0", + "opentelemetry-sdk>=1.27.0", + "opentelemetry-exporter-otlp-proto-http>=1.27.0", ], ) diff --git a/skills/specfact-code-review/SKILL.md b/skills/specfact-code-review/SKILL.md new file mode 100644 index 00000000..dbcd60d5 --- /dev/null +++ b/skills/specfact-code-review/SKILL.md @@ -0,0 +1,32 @@ +--- +name: specfact-code-review +description: House rules for AI coding sessions derived from review findings +allowed-tools: [] +--- + +# House Rules - AI Coding Context (v1) + +Updated: 2026-03-16 | Module: nold-ai/specfact-code-review + +## DO +- Ask whether tests should be included before repo-wide review; default to excluding tests unless test changes are the target +- Keep functions under 120 LOC and cyclomatic complexity <= 12 +- Add @require/@ensure (icontract) + @beartype to all new public APIs +- Run hatch run contract-test-contracts before any commit +- Guard all chained attribute access: a.b.c needs null-check or early return +- Return typed values from all public methods +- Write the test file BEFORE the feature file (TDD-first) +- Use get_logger(__name__) from common.logger_setup, never print() + +## DON'T +- Don't enable known noisy findings unless you explicitly want strict/full review output +- Don't mix read + write in the same method; split responsibilities +- Don't use bare except: or except Exception: pass +- Don't add # noqa / # type: ignore without inline justification +- Don't call repository.* and http_client.* in the same function +- Don't import at module level if it triggers network calls +- Don't hardcode secrets; use env vars via pydantic.BaseSettings +- Don't create functions > 120 lines + +## TOP VIOLATIONS (auto-updated by specfact code review rules update) + diff --git a/src/__init__.py b/src/__init__.py index 711efdfa..b02d2da8 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -3,4 +3,4 @@ """ # Package version: keep in sync with pyproject.toml, setup.py, src/specfact_cli/__init__.py -__version__ = "0.43.3" +__version__ = "0.45.1" diff --git a/src/specfact_cli/__init__.py b/src/specfact_cli/__init__.py index a4a9b8c3..ee71846a 100644 --- a/src/specfact_cli/__init__.py +++ b/src/specfact_cli/__init__.py @@ -45,6 +45,6 @@ def _bootstrap_bundle_paths() -> None: _bootstrap_bundle_paths() -__version__ = "0.44.0" +__version__ = "0.45.1" __all__ = ["__version__"] diff --git a/src/specfact_cli/cli.py b/src/specfact_cli/cli.py index 12d76121..15a9b2de 100644 --- a/src/specfact_cli/cli.py +++ b/src/specfact_cli/cli.py @@ -6,12 +6,15 @@ from __future__ import annotations +import importlib +import inspect import os import sys -from collections.abc import Callable +from collections.abc import Callable, Mapping +from dataclasses import dataclass from datetime import datetime from pathlib import Path -from typing import Annotated, cast +from typing import Annotated, Any, cast _DetectShellFn = Callable[..., tuple[str | None, str | None]] @@ -88,11 +91,52 @@ def _normalized_detect_shell(pid: int | None = None, max_depth: int = 10) -> tup "drift", "analyze", "policy", - "import", "sync", } ) +# First token -> official marketplace module that provides it (not the VS Code `code` CLI). +# Codebase import is `specfact code import`; persona Markdown import is `specfact project import` (not flat). +_INVOKED_TO_MARKETPLACE_MODULE: dict[str, str] = { + "backlog": "nold-ai/specfact-backlog", + "policy": "nold-ai/specfact-backlog", + "code": "nold-ai/specfact-codebase", + "analyze": "nold-ai/specfact-codebase", + "drift": "nold-ai/specfact-codebase", + "validate": "nold-ai/specfact-codebase", + "repro": "nold-ai/specfact-codebase", + "project": "nold-ai/specfact-project", + "plan": "nold-ai/specfact-project", + "sync": "nold-ai/specfact-project", + "migrate": "nold-ai/specfact-project", + "spec": "nold-ai/specfact-spec", + "contract": "nold-ai/specfact-spec", + "sdd": "nold-ai/specfact-spec", + "generate": "nold-ai/specfact-spec", + "govern": "nold-ai/specfact-govern", + "enforce": "nold-ai/specfact-govern", + "patch": "nold-ai/specfact-govern", +} + + +def _print_missing_bundle_command_help(invoked: str) -> None: + """Print install guidance when a bundle group or shim is not registered.""" + module_id = _INVOKED_TO_MARKETPLACE_MODULE.get(invoked) + console = get_configured_console() + if module_id is not None: + console.print( + f"[bold red]Module '{module_id}' is not installed.[/bold red]\n" + f"The [bold]{invoked}[/bold] command group is provided by that module. " + f"Install with [bold]specfact module install {module_id}[/bold], " + "or run [bold]specfact init --profile [/bold] to install bundles." + ) + return + console.print( + f"[bold red]Command '{invoked}' is not installed.[/bold red]\n" + "Install workflow bundles with [bold]specfact init --profile [/bold] " + "or [bold]specfact module install [/bold]." + ) + class _RootCLIGroup(ProgressiveDisclosureGroup): """Root group that shows actionable error when an unknown command is a known bundle group/shim.""" @@ -108,11 +152,7 @@ def resolve_command( result = super().resolve_command(ctx, args) except click.UsageError: if invoked in KNOWN_BUNDLE_GROUP_OR_SHIM_NAMES: - get_configured_console().print( - f"[bold red]Command '{invoked}' is not installed.[/bold red]\n" - "Install workflow bundles with [bold]specfact init --profile [/bold] " - "or [bold]specfact module install [/bold]." - ) + _print_missing_bundle_command_help(invoked) raise SystemExit(1) from None raise _name, cmd, remaining = result @@ -121,11 +161,7 @@ def resolve_command( invoked = remaining[0] if invoked not in KNOWN_BUNDLE_GROUP_OR_SHIM_NAMES: return result - get_configured_console().print( - f"[bold red]Command '{invoked}' is not installed.[/bold red]\n" - "Install workflow bundles with [bold]specfact init --profile [/bold] " - "or [bold]specfact module install [/bold]." - ) + _print_missing_bundle_command_help(invoked) raise SystemExit(1) @@ -283,9 +319,101 @@ def get_current_mode() -> OperationalMode: return _current_mode -@app.callback(invoke_without_command=True) -@require(lambda ctx: ctx is not None, "ctx must not be None") -def main( +@dataclass +class _RootCliFlags: + """Bundled root callback options (keeps the Typer callback body small for radon-kiss).""" + + version: bool | None + banner: bool + mode: str | None + debug: bool + skip_checks: bool + input_format: StructuredFormat + output_format: StructuredFormat + interaction: bool | None + + +_ROOT_MAIN_DOC = """ +SpecFact CLI - Specβ†’Contractβ†’Sentinel for contract-driven development. + +Transform your development workflow with automated quality gates, +runtime contract validation, and state machine workflows. + +Run **specfact init** or **specfact module install** to add workflow bundles +(backlog, code, project, spec, govern). + +**Backlog Management**: Use `specfact backlog refine` for AI-assisted template-driven +refinement of backlog items from GitHub Issues, Azure DevOps, and other tools. + +Mode Detection: +- Explicit --mode flag (highest priority) +- Auto-detect from environment (CoPilot API, IDE integration) +- Default to CI/CD mode + +Interaction Detection: +- Explicit --interactive/--no-interactive (highest priority) +- Auto-detect from terminal and CI environment +""" + + +def _apply_root_app_callback(ctx: typer.Context, flags: _RootCliFlags) -> None: + global _show_banner + global console + + # Rebind root and loaded module consoles for each invocation to avoid stale + # closed capture streams across sequential CliRunner/pytest command runs. + console = get_configured_console() + runtime.refresh_loaded_module_consoles() + + # Set banner flag based on --banner option + _show_banner = flags.banner + + # Set debug mode + set_debug_mode(flags.debug) + if flags.debug: + init_debug_log_file() + + runtime.configure_io_formats(input_format=flags.input_format, output_format=flags.output_format) + # Invert logic: --interactive means not non-interactive, --no-interactive means non-interactive + if flags.interaction is not None: + runtime.set_non_interactive_override(not flags.interaction) + else: + runtime.set_non_interactive_override(None) + + # Show welcome message if no command provided + if ctx.invoked_subcommand is None: + console.print( + Panel.fit( + "[bold green]βœ“[/bold green] SpecFact CLI is installed and working!\n\n" + f"Version: [cyan]{__version__}[/cyan]\n" + "Run [bold]specfact --help[/bold] for available commands.", + title="[bold]Welcome to SpecFact CLI[/bold]", + border_style="green", + ) + ) + raise typer.Exit() + + # Store mode in context for commands to access + if ctx.obj is None: + ctx.obj = {} + ctx.obj["mode"] = get_current_mode() + + +def _root_cli_flags_from_kwargs(kwargs: Mapping[str, Any]) -> _RootCliFlags: + """Build flags from Typer callback kwargs (param names match merged root CLI signature).""" + return _RootCliFlags( + version=kwargs.get("version"), + banner=kwargs.get("banner", False), + mode=kwargs.get("mode"), + debug=kwargs.get("debug", False), + skip_checks=kwargs.get("skip_checks", False), + input_format=kwargs.get("input_format", StructuredFormat.YAML), + output_format=kwargs.get("output_format", StructuredFormat.YAML), + interaction=kwargs.get("interaction"), + ) + + +def _root_sig_part1( ctx: typer.Context, version: bool | None = typer.Option( None, @@ -306,6 +434,11 @@ def main( callback=mode_callback, help="Operational mode: cicd (fast, deterministic) or copilot (enhanced, interactive)", ), +) -> None: + """Typer param signature fragment (merged for root callback); not invoked at runtime.""" + + +def _root_sig_part2( debug: bool = typer.Option( False, "--debug", @@ -332,6 +465,11 @@ def main( case_sensitive=False, ), ] = StructuredFormat.YAML, +) -> None: + """Typer param signature fragment (merged for root callback); not invoked at runtime.""" + + +def _root_sig_part3( interaction: Annotated[ bool | None, typer.Option( @@ -340,67 +478,25 @@ def main( ), ] = None, ) -> None: - """ - SpecFact CLI - Specβ†’Contractβ†’Sentinel for contract-driven development. - - Transform your development workflow with automated quality gates, - runtime contract validation, and state machine workflows. - - Run **specfact init** or **specfact module install** to add workflow bundles - (backlog, code, project, spec, govern). - - **Backlog Management**: Use `specfact backlog refine` for AI-assisted template-driven - refinement of backlog items from GitHub Issues, Azure DevOps, and other tools. + """Typer param signature fragment (merged for root callback); not invoked at runtime.""" - Mode Detection: - - Explicit --mode flag (highest priority) - - Auto-detect from environment (CoPilot API, IDE integration) - - Default to CI/CD mode - - Interaction Detection: - - Explicit --interactive/--no-interactive (highest priority) - - Auto-detect from terminal and CI environment - """ - global _show_banner - global console - - # Rebind root and loaded module consoles for each invocation to avoid stale - # closed capture streams across sequential CliRunner/pytest command runs. - console = get_configured_console() - runtime.refresh_loaded_module_consoles() - # Set banner flag based on --banner option - _show_banner = banner +def _merge_root_cli_param_specs(orig: Callable[..., Any]) -> dict[str, Any]: + merged: dict[str, Any] = {} + merged.update(orig(_root_sig_part1)) + merged.update(orig(_root_sig_part2)) + merged.update(orig(_root_sig_part3)) + return merged - # Set debug mode - set_debug_mode(debug) - if debug: - init_debug_log_file() - runtime.configure_io_formats(input_format=input_format, output_format=output_format) - # Invert logic: --interactive means not non-interactive, --no-interactive means non-interactive - if interaction is not None: - runtime.set_non_interactive_override(not interaction) - else: - runtime.set_non_interactive_override(None) +@app.callback(invoke_without_command=True) +@require(lambda ctx: ctx is not None, "ctx must not be None") +def main(ctx: typer.Context, **kwargs) -> None: + """SpecFact CLI root callback (full help text in _ROOT_MAIN_DOC).""" + _apply_root_app_callback(ctx, _root_cli_flags_from_kwargs(kwargs)) - # Show welcome message if no command provided - if ctx.invoked_subcommand is None: - console.print( - Panel.fit( - "[bold green]βœ“[/bold green] SpecFact CLI is installed and working!\n\n" - f"Version: [cyan]{__version__}[/cyan]\n" - "Run [bold]specfact --help[/bold] for available commands.", - title="[bold]Welcome to SpecFact CLI[/bold]", - border_style="green", - ) - ) - raise typer.Exit() - # Store mode in context for commands to access - if ctx.obj is None: - ctx.obj = {} - ctx.obj["mode"] = get_current_mode() +main.__doc__ = inspect.cleandoc(_ROOT_MAIN_DOC) # Register command groups from CommandRegistry (bootstrap preserves display order). @@ -602,24 +698,54 @@ def _get_group_from_info_wrapper( # Original Typer build functions (set once by _patch_typer_build so re-import of cli doesn't overwrite with our wrapper). _typer_get_group_from_info_original: Callable[..., click.Group] | None = None _typer_get_command_original: Callable[[typer.Typer], click.Command] | None = None +_typer_get_params_original: Callable[..., Any] | None = None + + +def _specfact_get_params_from_function(func: Callable[..., Any]) -> Any: + """Map thin Typer entrypoints to their option-rich implementations for Click param generation.""" + orig = _typer_get_params_original + if orig is None: + import typer.utils as typer_utils + + return typer_utils.get_params_from_function(func) + # ``@app.callback()`` / ``@app.command()`` may wrap the function; match by name + module. + if getattr(func, "__name__", "") == "main" and getattr(func, "__module__", "") == __name__: + return _merge_root_cli_param_specs(orig) + if ( + getattr(func, "__name__", "") == "install" + and getattr(func, "__module__", "") == "specfact_cli.modules.module_registry.src.commands" + ): + module = sys.modules.get("specfact_cli.modules.module_registry.src.commands") + if module is not None: + merge_install = getattr(module, "_specfact_merge_install_param_specs", None) + if merge_install is not None: + return merge_install(orig) + return orig(func) # Patch so root app build uses our delegate group for lazy typers (built via get_group_from_info). def _patch_typer_build() -> None: - import typer.main as typer_main + import typer.utils as typer_utils - global _typer_get_group_from_info_original, _typer_get_command_original + typer_main = cast(Any, importlib.import_module("typer.main")) + + global _typer_get_group_from_info_original, _typer_get_command_original, _typer_get_params_original # Save originals only on first patch; avoid overwriting with our wrapper when cli is re-imported (e.g. by plan module). if _typer_get_group_from_info_original is None: _typer_get_group_from_info_original = typer_main.get_group_from_info if _typer_get_command_original is None: _typer_get_command_original = typer_main.get_command + if _typer_get_params_original is None: + _typer_get_params_original = typer_utils.get_params_from_function + typer_utils.get_params_from_function = _specfact_get_params_from_function + # typer.main may have bound get_params_from_function at import time; keep in sync. + typer_main.get_params_from_function = _specfact_get_params_from_function typer_main.get_command = _get_command typer_main.get_group_from_info = _get_group_from_info_wrapper -register_builtin_commands() _patch_typer_build() +register_builtin_commands() def _grouped_command_order( diff --git a/src/specfact_cli/generators/plan_generator.py b/src/specfact_cli/generators/plan_generator.py index b98a0082..633c97a2 100644 --- a/src/specfact_cli/generators/plan_generator.py +++ b/src/specfact_cli/generators/plan_generator.py @@ -35,8 +35,10 @@ def __init__(self, templates_dir: Path | None = None) -> None: self.templates_dir = Path(templates_dir) self.env = Environment( loader=FileSystemLoader(self.templates_dir), - trim_blocks=True, - lstrip_blocks=True, + # Must be False: trim_blocks removes the newline after {% endraw %}, merging `if:` with `run:`. + trim_blocks=False, + # Must be False: lstrip_blocks strips newlines after {% endraw %} in some cases. + lstrip_blocks=False, ) @beartype diff --git a/src/specfact_cli/generators/workflow_generator.py b/src/specfact_cli/generators/workflow_generator.py index 322e61ba..fc6144ed 100644 --- a/src/specfact_cli/generators/workflow_generator.py +++ b/src/specfact_cli/generators/workflow_generator.py @@ -40,8 +40,10 @@ def __init__(self, templates_dir: Path | None = None) -> None: self.templates_dir = Path(templates_dir) self.env = Environment( loader=FileSystemLoader(self.templates_dir), - trim_blocks=True, - lstrip_blocks=True, + # Must be False: trim_blocks removes the newline after {% endraw %}, merging `if:` with `run:`. + trim_blocks=False, + # Must be False: lstrip_blocks strips newlines after {% endraw %} in some cases. + lstrip_blocks=False, ) @beartype @@ -56,6 +58,7 @@ def generate_github_action( repo_name: str | None = None, budget: int = 90, python_version: str = "3.12", + bundle_name: str = "main", ) -> None: """ Generate GitHub Action workflow for SpecFact validation. @@ -65,6 +68,7 @@ def generate_github_action( repo_name: Repository name for context budget: Time budget in seconds for validation (must be > 0) python_version: Python version for workflow (must be 3.x) + bundle_name: Default project bundle name for optional `project version check` in CI Raises: FileNotFoundError: If template file doesn't exist @@ -75,6 +79,7 @@ def generate_github_action( "repo_name": repo_name or "specfact-project", "budget": budget, "python_version": python_version, + "bundle_name": bundle_name, } # Render template diff --git a/src/specfact_cli/modules/init/module-package.yaml b/src/specfact_cli/modules/init/module-package.yaml index 79bfa74d..aa9347f6 100644 --- a/src/specfact_cli/modules/init/module-package.yaml +++ b/src/specfact_cli/modules/init/module-package.yaml @@ -1,5 +1,5 @@ name: init -version: 0.1.21 +version: 0.1.24 commands: - init category: core @@ -17,5 +17,5 @@ publisher: description: Initialize SpecFact workspace and bootstrap local configuration. license: Apache-2.0 integrity: - checksum: sha256:925fd303b581441618597b5fa5d7f308cbc31405455ae7811243c072616974bf - signature: uMDekXAxcKEDb8V1rqEeL8X806bP4otT5rT5ShXFlUNjcJ06c90s7xg1KqTNYj/eVsuh07xKSGnWKo1BG6juAw== + checksum: sha256:eec17e6377c9a4a9d0abc4c3da81069d729ff4394b277016f5428d2e798ab1ef + signature: FLs/jr95lBtXCawAzBK08xb0LCPl/veYsSZMhq11KU+11ieyIg8j/GosyCOrpUsTKjb3j3xfZv1MmjERHn7mDg== diff --git a/src/specfact_cli/modules/init/src/commands.py b/src/specfact_cli/modules/init/src/commands.py index 057c5d0c..a725b15a 100644 --- a/src/specfact_cli/modules/init/src/commands.py +++ b/src/specfact_cli/modules/init/src/commands.py @@ -2,6 +2,7 @@ from __future__ import annotations +import os import subprocess from pathlib import Path from typing import Any, cast @@ -168,6 +169,15 @@ def _copy_backlog_field_mapping_templates(repo_path: Path, force: bool, console: app = typer.Typer(help="Bootstrap SpecFact (use `init ide` for IDE setup; module lifecycle is under `specfact module`)") console = Console() + + +def _init_user_visible_step(message: str) -> None: + """Print init progress unless running under pytest (keeps test output clean).""" + if os.environ.get("PYTEST_CURRENT_TEST"): + return + console.print(message) + + _MODULE_IO_CONTRACT = ModuleIOContract import_to_bundle = module_io_shim.import_to_bundle export_from_bundle = module_io_shim.export_from_bundle @@ -428,6 +438,7 @@ def _install_profile_bundles(profile: str, install_root: Path, non_interactive: """Resolve profile to bundle list and install via module installer.""" bundle_ids = first_run_selection.resolve_profile_bundles(profile) if bundle_ids: + _init_user_visible_step(f"[cyan]β†’[/cyan] Profile [bold]{profile}[/bold]: preparing workflow bundles…") install_bundles_for_init( bundle_ids, install_root, @@ -440,6 +451,7 @@ def _install_bundle_list(install_arg: str, install_root: Path, non_interactive: """Parse comma-separated or 'all' and install bundles via module installer.""" bundle_ids = first_run_selection.resolve_install_bundles(install_arg) if bundle_ids: + _init_user_visible_step("[cyan]β†’[/cyan] Installing bundles from [bold]--install[/bold]…") install_bundles_for_init( bundle_ids, install_root, @@ -705,10 +717,12 @@ def init( elif is_first_run(user_root=INIT_USER_MODULES_ROOT) and not is_non_interactive(): _run_interactive_first_run_install() + _init_user_visible_step("[cyan]β†’[/cyan] Discovering installed modules and writing registry state…") modules_list = get_discovered_modules_for_state(enable_ids=[], disable_ids=[]) if modules_list: write_modules_state(modules_list) + _init_user_visible_step("[cyan]β†’[/cyan] Indexing CLI commands for help cache…") run_discovery_and_write_cache(__version__) if install_deps: @@ -725,6 +739,7 @@ def init( "[cyan]Module management has moved to `specfact module`[/cyan] " "[dim](for example: `specfact module list`, `specfact module init`)[/dim]" ) + _init_user_visible_step("[cyan]β†’[/cyan] Checking IDE prompt export status…") _audit_prompt_installation(repo_path) console.print("[dim]Use `specfact init ide` to install/update IDE prompts and settings.[/dim]") diff --git a/src/specfact_cli/modules/init/src/first_run_selection.py b/src/specfact_cli/modules/init/src/first_run_selection.py index 66032066..7bc7e0e1 100644 --- a/src/specfact_cli/modules/init/src/first_run_selection.py +++ b/src/specfact_cli/modules/init/src/first_run_selection.py @@ -2,7 +2,10 @@ from __future__ import annotations +import os +from dataclasses import dataclass from pathlib import Path +from typing import Any from beartype import beartype from icontract import ensure, require @@ -12,7 +15,7 @@ PROFILE_PRESETS: dict[str, list[str]] = { - "solo-developer": ["specfact-codebase"], + "solo-developer": ["specfact-codebase", "specfact-code-review"], "backlog-team": ["specfact-backlog", "specfact-project", "specfact-codebase"], "api-first-team": ["specfact-spec", "specfact-codebase"], "enterprise-full-stack": [ @@ -24,7 +27,7 @@ ], } -CANONICAL_BUNDLES: tuple[str, ...] = ( +_INSTALL_ALL_BUNDLES: tuple[str, ...] = ( "specfact-project", "specfact-backlog", "specfact-codebase", @@ -32,6 +35,19 @@ "specfact-govern", ) +# Includes marketplace-only bundles referenced by profiles (e.g. specfact-code-review). +CANONICAL_BUNDLES: tuple[str, ...] = (*_INSTALL_ALL_BUNDLES, "specfact-code-review") + +# Workflow bundles are installed from the marketplace (slim wheel has no per-command shims under ~/.specfact/modules). +MARKETPLACE_ONLY_BUNDLES: dict[str, str] = { + "specfact-project": "nold-ai/specfact-project", + "specfact-backlog": "nold-ai/specfact-backlog", + "specfact-codebase": "nold-ai/specfact-codebase", + "specfact-spec": "nold-ai/specfact-spec", + "specfact-govern": "nold-ai/specfact-govern", + "specfact-code-review": "nold-ai/specfact-code-review", +} + BUNDLE_ALIAS_TO_CANONICAL: dict[str, str] = { "project": "specfact-project", "backlog": "specfact-backlog", @@ -41,19 +57,176 @@ "govern": "specfact-govern", } +# Optional: names of *bundled* module dirs shipped inside this CLI wheel (see module_installer). Workflow +# bundles use MARKETPLACE_ONLY_BUNDLES only β€” do not list Typer subcommand names here. BUNDLE_TO_MODULE_NAMES: dict[str, list[str]] = { - "specfact-project": ["project", "plan", "import_cmd", "sync", "migrate"], - "specfact-backlog": ["backlog", "policy_engine"], - "specfact-codebase": ["analyze", "drift", "validate", "repro"], - "specfact-spec": ["contract", "spec", "sdd", "generate"], - "specfact-govern": ["enforce", "patch_mode"], + "specfact-project": [], + "specfact-backlog": [], + "specfact-codebase": [], + "specfact-spec": [], + "specfact-govern": [], + "specfact-code-review": [], } BUNDLE_DEPENDENCIES: dict[str, list[str]] = { "specfact-spec": ["specfact-project"], + "specfact-code-review": ["specfact-codebase"], +} + +BUNDLE_DISPLAY: dict[str, str] = { + "specfact-project": "Project lifecycle (project, plan, import, sync, migrate)", + "specfact-backlog": "Backlog management (backlog, policy)", + "specfact-codebase": "Codebase quality (analyze, drift, validate, repro)", + "specfact-spec": "Spec & API (contract, spec, sdd, generate)", + "specfact-govern": "Governance (enforce, patch)", + "specfact-code-review": "Scored code review (code review gate)", } +def _emit_init_bundle_progress() -> bool: + """Return True when init should print progress (suppressed during pytest).""" + return os.environ.get("PYTEST_CURRENT_TEST") is None + + +def _expand_bundle_install_order(bundle_ids: list[str]) -> list[str]: + to_install: list[str] = [] + seen: set[str] = set() + + def _add_bundle(bid: str) -> None: + if bid in seen: + return + for dep in BUNDLE_DEPENDENCIES.get(bid, []): + _add_bundle(dep) + seen.add(bid) + to_install.append(bid) + + for bid in bundle_ids: + if bid not in CANONICAL_BUNDLES: + continue + _add_bundle(bid) + return to_install + + +@dataclass +class _InitBundleInstallDeps: + root: Path + trust_non_official: bool + non_interactive: bool + emit: bool + console: Any + install_bundled_module: Any + install_module: Any + + +def _emit_bundle_row_header( + bid: str, + deps: _InitBundleInstallDeps, + *, + module_names: list[str], + bundle_label: str, + marketplace_id: str | None, +) -> None: + if not deps.emit: + return + if module_names or marketplace_id: + deps.console.print(f"[cyan]β†’[/cyan] Bundle [bold]{bid}[/bold] β€” {bundle_label}") + else: + deps.console.print( + f"[yellow]β†’[/yellow] Bundle [bold]{bid}[/bold] has no bundled modules in this CLI; " + f"install with [bold]specfact module install nold-ai/{bid}[/bold] when online." + ) + + +def _install_one_bundled_module_line(bid: str, module_name: str, deps: _InitBundleInstallDeps) -> None: + from specfact_cli.common import get_bridge_logger + + if deps.emit: + deps.console.print(f"[dim] Β·[/dim] Installing module [bold]{module_name}[/bold] …") + try: + installed = deps.install_bundled_module( + module_name, + deps.root, + trust_non_official=deps.trust_non_official, + non_interactive=deps.non_interactive, + ) + except Exception as e: + logger = get_bridge_logger(__name__) + logger.warning( + "Bundle install failed for %s: %s. Dependency resolver may be unavailable.", + module_name, + e, + ) + if deps.emit: + deps.console.print( + f"[red]βœ—[/red] Failed on module [bold]{module_name}[/bold] from bundle [bold]{bid}[/bold]: {e}" + ) + deps.console.print( + "[dim] Check disk space and permissions under ~/.specfact/modules, " + "or retry if a transient I/O error.[/dim]" + ) + raise + if installed: + if deps.emit: + deps.console.print(f"[green] βœ“[/green] {module_name} ready") + elif deps.emit: + deps.console.print( + f"[yellow] ⚠[/yellow] {module_name} is not bundled in this CLI build; " + f"try [bold]specfact module install nold-ai/{bid}[/bold] when online." + ) + + +def _install_marketplace_for_bundle(bid: str, marketplace_id: str, deps: _InitBundleInstallDeps) -> None: + from specfact_cli.common import get_bridge_logger + + if deps.emit: + deps.console.print(f"[dim] Β·[/dim] Installing marketplace module [bold]{marketplace_id}[/bold] …") + try: + deps.install_module( + marketplace_id, + install_root=deps.root, + non_interactive=deps.non_interactive, + trust_non_official=deps.trust_non_official, + ) + except Exception as e: + logger = get_bridge_logger(__name__) + logger.warning( + "Marketplace bundle install failed for %s: %s.", + marketplace_id, + e, + ) + if deps.emit: + deps.console.print( + f"[red]βœ—[/red] Failed on marketplace module [bold]{marketplace_id}[/bold] " + f"from bundle [bold]{bid}[/bold]: {e}" + ) + deps.console.print( + "[dim] Check network access and permissions under ~/.specfact/modules, " + "or retry if a transient error.[/dim]" + ) + raise + if deps.emit: + deps.console.print(f"[green] βœ“[/green] {marketplace_id.split('/', 1)[1]} ready") + + +def _process_one_bundle_install_row(bid: str, deps: _InitBundleInstallDeps) -> None: + """Install bundled and/or marketplace modules for one canonical bundle id.""" + module_names = BUNDLE_TO_MODULE_NAMES.get(bid, []) + bundle_label = BUNDLE_DISPLAY.get(bid, bid) + marketplace_id = MARKETPLACE_ONLY_BUNDLES.get(bid) + _emit_bundle_row_header( + bid, + deps, + module_names=module_names, + bundle_label=bundle_label, + marketplace_id=marketplace_id, + ) + for module_name in module_names: + _install_one_bundled_module_line(bid, module_name, deps) + if not marketplace_id: + return + _install_marketplace_for_bundle(bid, marketplace_id, deps) + + @require(lambda profile: isinstance(profile, str) and profile.strip() != "", "profile must be non-empty string") @ensure(lambda result: isinstance(result, list), "result must be list of bundle ids") @beartype @@ -75,7 +248,7 @@ def resolve_install_bundles(install_arg: str) -> list[str]: if not raw: return [] if raw.lower() == "all": - return list(CANONICAL_BUNDLES) + return list(_INSTALL_ALL_BUNDLES) seen: set[str] = set() result: list[str] = [] for part in raw.split(","): @@ -124,50 +297,41 @@ def install_bundles_for_init( *, non_interactive: bool = False, trust_non_official: bool = False, + show_progress: bool = True, ) -> None: """Install the given bundles (and their dependencies) via bundled module installer.""" + from rich.console import Console + from specfact_cli.registry.module_installer import ( USER_MODULES_ROOT as DEFAULT_ROOT, install_bundled_module, + install_module, ) root = install_root or DEFAULT_ROOT - to_install: list[str] = [] - seen: set[str] = set() - - def _add_bundle(bid: str) -> None: - if bid in seen: - return - for dep in BUNDLE_DEPENDENCIES.get(bid, []): - _add_bundle(dep) - seen.add(bid) - to_install.append(bid) + emit = show_progress and _emit_init_bundle_progress() + console = Console() + to_install = _expand_bundle_install_order(bundle_ids) + deps = _InitBundleInstallDeps( + root=root, + trust_non_official=trust_non_official, + non_interactive=non_interactive, + emit=emit, + console=console, + install_bundled_module=install_bundled_module, + install_module=install_module, + ) - for bid in bundle_ids: - if bid not in CANONICAL_BUNDLES: - continue - _add_bundle(bid) + if emit and to_install: + bundle_list = ", ".join(to_install) + console.print(f"[cyan]β†’[/cyan] Seeding workflow bundles: [bold]{bundle_list}[/bold]") + console.print("[dim] (copying bundled modules into your user module directory)[/dim]") for bid in to_install: - module_names = BUNDLE_TO_MODULE_NAMES.get(bid, []) - for module_name in module_names: - try: - install_bundled_module( - module_name, - root, - trust_non_official=trust_non_official, - non_interactive=non_interactive, - ) - except Exception as e: - from specfact_cli.common import get_bridge_logger - - logger = get_bridge_logger(__name__) - logger.warning( - "Bundle install failed for %s: %s. Dependency resolver may be unavailable.", - module_name, - e, - ) - raise + _process_one_bundle_install_row(bid, deps) + + if emit and to_install: + console.print(f"[green]βœ“[/green] Installed: {', '.join(to_install)}") @ensure(lambda result: isinstance(result, list) and len(result) > 0, "Must return non-empty list of profile names") @@ -182,14 +346,6 @@ def get_valid_bundle_aliases() -> list[str]: return [*sorted(BUNDLE_ALIAS_TO_CANONICAL), "all"] -BUNDLE_DISPLAY: dict[str, str] = { - "specfact-project": "Project lifecycle (project, plan, import, sync, migrate)", - "specfact-backlog": "Backlog management (backlog, policy)", - "specfact-codebase": "Codebase quality (analyze, drift, validate, repro)", - "specfact-spec": "Spec & API (contract, spec, sdd, generate)", - "specfact-govern": "Governance (enforce, patch)", -} - PROFILE_DISPLAY_ORDER: list[tuple[str, str]] = [ ("solo-developer", "Solo developer"), ("backlog-team", "Backlog team"), diff --git a/src/specfact_cli/modules/module_registry/module-package.yaml b/src/specfact_cli/modules/module_registry/module-package.yaml index 8944331e..c0cf77ba 100644 --- a/src/specfact_cli/modules/module_registry/module-package.yaml +++ b/src/specfact_cli/modules/module_registry/module-package.yaml @@ -1,5 +1,5 @@ name: module-registry -version: 0.1.12 +version: 0.1.17 commands: - module category: core @@ -17,5 +17,5 @@ publisher: description: 'Manage modules: search, list, show, install, and upgrade.' license: Apache-2.0 integrity: - checksum: sha256:c73488f1e4966e97cb3c71fbd89ad631bc07beb3c5a795f1b81c53c2f4291803 - signature: 1vEDdIav1yUIPSxkkMLPODj6zoDB/QTcR/CJYn27OZRIVBCFU8Cyx+6MWgC79lAjiOK69wSYQSgyixP+NPwcDg== + checksum: sha256:9a16aa56293e9da54f6a6e11a7d596ffe254fab0fe23658966f606dba27abf94 + signature: LuwmQRpXtdH4GeKsT7cjm4TTNwnky2sVwuuFYoEzeh+V8BsBdjm4wm7WTTQMK5+KCy0q8erayaj4FOSiYRJcAQ== diff --git a/src/specfact_cli/modules/module_registry/src/commands.py b/src/specfact_cli/modules/module_registry/src/commands.py index a993b5f6..709ec896 100644 --- a/src/specfact_cli/modules/module_registry/src/commands.py +++ b/src/specfact_cli/modules/module_registry/src/commands.py @@ -3,14 +3,20 @@ from __future__ import annotations import inspect +import os import shutil +from collections.abc import Callable, Iterator +from contextlib import contextmanager +from dataclasses import dataclass from pathlib import Path -from typing import Any, cast +from typing import Annotated, Any, cast import typer import yaml from beartype import beartype +from click.exceptions import Exit as ClickExit from icontract import require +from packaging.version import InvalidVersion, Version from rich.console import Console from rich.table import Table @@ -21,6 +27,7 @@ from specfact_cli.registry.marketplace_client import fetch_registry_index from specfact_cli.registry.module_discovery import discover_all_modules from specfact_cli.registry.module_installer import ( + REGISTRY_ID_FILE, USER_MODULES_ROOT, get_bundled_module_metadata, install_bundled_module, @@ -43,24 +50,39 @@ console = Console() +def _module_upgrade_show_spinner() -> bool: + """Rich Live/spinner breaks some tests; mirror ``utils.progress`` test-mode detection.""" + return os.environ.get("TEST_MODE") != "true" and os.environ.get("PYTEST_CURRENT_TEST") is None + + +@contextmanager +def _module_upgrade_status(description: str) -> Iterator[None]: + """Show a Rich status spinner during long-running upgrade steps (fetch, install).""" + if _module_upgrade_show_spinner(): + with console.status(description, spinner="dots"): + yield + else: + yield + + def _init_scope_nonempty(scope: str) -> bool: return bool(scope) -def _module_id_arg_nonempty(module_id: str) -> bool: - return bool(module_id.strip()) +def _strip_nonempty(s: str) -> bool: + return bool(s.strip()) def _module_name_arg_nonempty(module_name: str) -> bool: - return bool(module_name.strip()) + return _strip_nonempty(module_name) def _alias_name_nonempty(alias_name: str) -> bool: - return bool(alias_name.strip()) + return _strip_nonempty(alias_name) def _command_name_nonempty(command_name: str) -> bool: - return bool(command_name.strip()) + return _strip_nonempty(command_name) def _url_nonempty(url: str) -> bool: @@ -68,23 +90,33 @@ def _url_nonempty(url: str) -> bool: def _registry_id_nonempty(registry_id: str) -> bool: - return registry_id.strip() != "" + return _strip_nonempty(registry_id) -def _module_id_optional_nonempty(module_id: str | None) -> bool: - return module_id is None or module_id.strip() != "" +def _search_query_nonempty(query: str) -> bool: + return _strip_nonempty(query) -def _search_query_nonempty(query: str) -> bool: - return bool(query.strip()) +def _module_id_optional_nonempty(module_id: str | None) -> bool: + return module_id is None or module_id.strip() != "" def _list_source_filter_ok(source: str | None) -> bool: return source is None or source in ("builtin", "project", "user", "marketplace", "custom") -def _upgrade_module_name_optional(module_name: str | None) -> bool: - return module_name is None or module_name.strip() != "" +def _upgrade_module_names_valid(module_names: list[str] | None) -> bool: + if module_names is None: + return True + return all(m.strip() != "" for m in module_names) + + +def _install_module_ids_nonempty(module_ids: list[str]) -> bool: + return bool(module_ids) and all(m.strip() != "" for m in module_ids) + + +def _uninstall_module_names_nonempty(module_names: list[str]) -> bool: + return bool(module_names) and all(m.strip() != "" for m in module_names) def _publisher_url_from_metadata(metadata: object | None) -> str: @@ -236,14 +268,72 @@ def init_modules( console.print(f"[green]Seeded {seeded} module(s) into {target_root}[/green]") -@app.command() -@beartype -@require(_module_id_arg_nonempty, "module_id must not be empty") -def install( - module_id: str = typer.Argument(..., help="Module id (name or namespace/name format)"), - version: str | None = typer.Option(None, "--version", help="Install a specific version"), +@dataclass(frozen=True) +class _InstallOneParams: + scope_normalized: str + source_normalized: str + target_root: Path + version: str | None + reinstall: bool + trust_non_official: bool + skip_deps: bool + force: bool + discovered_by_name: dict[str, Any] + + +def _install_one(module_id: str, params: _InstallOneParams) -> bool: + """Install a single module; return True on success, False if skipped/already installed.""" + normalized, requested_name = _normalize_install_module_id(module_id) + if _install_skip_if_already_satisfied( + params.scope_normalized, + requested_name, + params.target_root, + params.reinstall, + params.discovered_by_name, + ): + return True + if _try_install_bundled_module( + params.source_normalized, + requested_name, + normalized, + params.target_root, + params.trust_non_official, + ): + return True + try: + installed_path = install_module( + normalized, + version=params.version, + reinstall=params.reinstall, + install_root=params.target_root, + trust_non_official=params.trust_non_official, + non_interactive=is_non_interactive(), + skip_deps=params.skip_deps, + force=params.force, + ) + except Exception as exc: + console.print(f"[red]Failed installing {normalized}: {exc}[/red]") + return False + console.print(f"[green]Installed[/green] {normalized} -> {installed_path}") + publisher = _publisher_from_module_id(normalized) + if is_official_publisher(publisher): + console.print(f"Verified: official ({publisher})") + return True + + +def _install_sig_part1( + module_ids: Annotated[ + list[str], + typer.Argument(help="Module id(s) (name or namespace/name); space-separated for multiple"), + ], + version: str | None = typer.Option(None, "--version", help="Install a specific version (single module only)"), scope: str = typer.Option("user", "--scope", help="Install scope: user or project"), source: str = typer.Option("auto", "--source", help="Install source: auto, bundled, or marketplace"), +) -> None: + """Typer param signature fragment (merged for install); not invoked at runtime.""" + + +def _install_sig_part2( repo: Path | None = typer.Option(None, "--repo", help="Repository path for project scope (default: current dir)"), trust_non_official: bool = typer.Option( False, @@ -260,39 +350,75 @@ def install( "--force", help="Force install even if dependency resolution reports conflicts", ), +) -> None: + """Typer param signature fragment (merged for install); not invoked at runtime.""" + + +def _install_sig_part3( reinstall: bool = typer.Option( False, "--reinstall", help="Reinstall even if module is already present (e.g. to refresh integrity metadata)", ), ) -> None: - """Install a module from bundled artifacts or marketplace registry.""" + """Typer param signature fragment (merged for install); not invoked at runtime.""" + + +def _specfact_merge_install_param_specs(orig: Callable[..., Any]) -> dict[str, Any]: + merged: dict[str, Any] = {} + merged.update(orig(_install_sig_part1)) + merged.update(orig(_install_sig_part2)) + merged.update(orig(_install_sig_part3)) + return merged + + +@beartype +def _install_impl(module_ids: list[str], **kwargs: Any) -> None: + """Install one or more modules from bundled artifacts or marketplace registry.""" + version = kwargs.get("version") + scope = kwargs.get("scope", "user") + source = kwargs.get("source", "auto") + repo = kwargs.get("repo") + trust_non_official = kwargs.get("trust_non_official", False) + skip_deps = kwargs.get("skip_deps", False) + force = kwargs.get("force", False) + reinstall = kwargs.get("reinstall", False) + if version is not None and sum(1 for mid in module_ids if mid.strip()) > 1: + console.print( + "[red]--version applies to a single module; install one module at a time or omit --version.[/red]" + ) + raise typer.Exit(1) scope_normalized, source_normalized = _parse_install_scope_and_source(scope, source) target_root = _resolve_install_target_root(scope_normalized, repo) - normalized, requested_name = _normalize_install_module_id(module_id) discovered_by_name = {entry.metadata.name: entry for entry in discover_all_modules()} - if _install_skip_if_already_satisfied(scope_normalized, requested_name, target_root, reinstall, discovered_by_name): - return - if _try_install_bundled_module(source_normalized, requested_name, normalized, target_root, trust_non_official): - return - try: - installed_path = install_module( - normalized, - version=version, - reinstall=reinstall, - install_root=target_root, - trust_non_official=trust_non_official, - non_interactive=is_non_interactive(), - skip_deps=skip_deps, - force=force, - ) - except Exception as exc: - console.print(f"[red]Failed installing {normalized}: {exc}[/red]") - raise typer.Exit(1) from exc - console.print(f"[green]Installed[/green] {normalized} -> {installed_path}") - publisher = _publisher_from_module_id(normalized) - if is_official_publisher(publisher): - console.print(f"Verified: official ({publisher})") + params = _InstallOneParams( + scope_normalized=scope_normalized, + source_normalized=source_normalized, + target_root=target_root, + version=version, + reinstall=reinstall, + trust_non_official=trust_non_official, + skip_deps=skip_deps, + force=force, + discovered_by_name=discovered_by_name, + ) + for module_id in module_ids: + if not _install_one(module_id, params): + raise typer.Exit(1) + + +@app.command() +@require(_install_module_ids_nonempty, "at least one non-blank module id is required") +@beartype +def install( + module_ids: Annotated[ + list[str], + typer.Argument(help="Module id(s) (name or namespace/name); space-separated for multiple"), + ], + **kwargs, +) -> None: + """Install one or more modules from bundled artifacts or marketplace registry.""" + _install_impl(module_ids, **kwargs) def _normalize_uninstall_module_name(module_name: str) -> str: @@ -331,31 +457,67 @@ def _resolve_uninstall_scope( return scope_normalized -def _uninstall_from_explicit_scope( - scope_normalized: str | None, - normalized: str, - project_root: Path, - user_root: Path, - project_module_dir: Path, - user_module_dir: Path, -) -> bool: - if scope_normalized == "project": - if not project_module_dir.exists(): - console.print(f"[red]Module '{normalized}' is not installed in project scope ({project_root}).[/red]") +@dataclass +class _ExplicitUninstallPaths: + scope_normalized: str | None + normalized: str + project_root: Path + user_root: Path + project_module_dir: Path + user_module_dir: Path + + +def _uninstall_from_explicit_scope(ctx: _ExplicitUninstallPaths) -> bool: + if ctx.scope_normalized == "project": + if not ctx.project_module_dir.exists(): + console.print( + f"[red]Module '{ctx.normalized}' is not installed in project scope ({ctx.project_root}).[/red]" + ) raise typer.Exit(1) - shutil.rmtree(project_module_dir) - console.print(f"[green]Uninstalled[/green] {normalized} from {project_root}") + try: + shutil.rmtree(ctx.project_module_dir) + except OSError as exc: + console.print(f"[red]Could not remove module directory {ctx.project_module_dir}: {exc}[/red]") + raise typer.Exit(1) from exc + console.print(f"[green]Uninstalled[/green] {ctx.normalized} from {ctx.project_root}") return True - if scope_normalized == "user": - if not user_module_dir.exists(): - console.print(f"[red]Module '{normalized}' is not installed in user scope ({user_root}).[/red]") + if ctx.scope_normalized == "user": + if not ctx.user_module_dir.exists(): + console.print(f"[red]Module '{ctx.normalized}' is not installed in user scope ({ctx.user_root}).[/red]") raise typer.Exit(1) - shutil.rmtree(user_module_dir) - console.print(f"[green]Uninstalled[/green] {normalized} from {user_root}") + try: + shutil.rmtree(ctx.user_module_dir) + except OSError as exc: + console.print(f"[red]Could not remove module directory {ctx.user_module_dir}: {exc}[/red]") + raise typer.Exit(1) from exc + console.print(f"[green]Uninstalled[/green] {ctx.normalized} from {ctx.user_root}") return True return False +def _uninstall_single_module(module_name: str, scope: str | None, repo: Path | None) -> None: + """Uninstall one module; raises ``typer.Exit`` on failure.""" + normalized = _normalize_uninstall_module_name(module_name) + repo_path = (repo or Path.cwd()).resolve() + project_root = repo_path / ".specfact" / "modules" + user_root = USER_MODULES_ROOT + project_module_dir = project_root / normalized + user_module_dir = user_root / normalized + scope_normalized = _resolve_uninstall_scope(scope, normalized, project_module_dir, user_module_dir) + if _uninstall_from_explicit_scope( + _ExplicitUninstallPaths( + scope_normalized=scope_normalized, + normalized=normalized, + project_root=project_root, + user_root=user_root, + project_module_dir=project_module_dir, + user_module_dir=user_module_dir, + ) + ): + return + _uninstall_marketplace_default(normalized) + + def _uninstall_marketplace_default(normalized: str) -> None: discovered_by_name = {entry.metadata.name: entry for entry in discover_all_modules()} existing = discovered_by_name.get(normalized) @@ -388,26 +550,27 @@ def _uninstall_marketplace_default(normalized: str) -> None: @app.command() +@require(_uninstall_module_names_nonempty, "at least one non-blank module name is required") @beartype -@require(_module_name_arg_nonempty, "module_name must not be empty") def uninstall( - module_name: str = typer.Argument(..., help="Installed module name (name or namespace/name)"), + module_names: Annotated[ + list[str], + typer.Argument(help="Installed module name(s) (name or namespace/name)"), + ], scope: str | None = typer.Option(None, "--scope", help="Uninstall scope: user or project"), repo: Path | None = typer.Option(None, "--repo", help="Repository path for project scope (default: current dir)"), ) -> None: - """Uninstall a marketplace module.""" - normalized = _normalize_uninstall_module_name(module_name) - repo_path = (repo or Path.cwd()).resolve() - project_root = repo_path / ".specfact" / "modules" - user_root = USER_MODULES_ROOT - project_module_dir = project_root / normalized - user_module_dir = user_root / normalized - scope_normalized = _resolve_uninstall_scope(scope, normalized, project_module_dir, user_module_dir) - if _uninstall_from_explicit_scope( - scope_normalized, normalized, project_root, user_root, project_module_dir, user_module_dir - ): - return - _uninstall_marketplace_default(normalized) + """Uninstall one or more marketplace modules.""" + failed = False + for module_name in module_names: + stripped = module_name.strip() + try: + _uninstall_single_module(stripped, scope, repo) + except ClickExit as exc: + if exc.exit_code not in (0, None): + failed = True + if failed: + raise typer.Exit(1) alias_app = typer.Typer(help="Manage command aliases (map name to namespaced module)") @@ -944,40 +1107,227 @@ def show(module_name: str = typer.Argument(..., help="Installed module name")) - console.print(_build_module_details_table(module_name, module_row, metadata)) +def _upgrade_row_for_target(target: str, by_id: dict[str, dict[str, Any]]) -> dict[str, Any]: + if target in by_id: + return by_id[target] + if target.count("/") > 1: + return {} + short = target.split("/")[-1] + if short in by_id: + return by_id[short] + for key, row in by_id.items(): + if key == short or str(key).endswith(f"/{short}"): + return row + return {} + + +def _full_marketplace_module_id_for_install(target: str) -> str: + """Return ``namespace/name`` for ``install_module`` from a target key or short id.""" + t = target.strip() + if t.count("/") > 1: + raise ValueError( + f"Invalid module id {target!r}: expected owner/repo or a short module name, not a multi-segment path." + ) + if "/" in t and t.count("/") == 1: + left, right = t.split("/", 1) + if left.strip() and right.strip(): + return t + short = t.split("/")[-1] + id_file = USER_MODULES_ROOT / short / REGISTRY_ID_FILE + if id_file.exists(): + txt = id_file.read_text(encoding="utf-8").strip() + if txt and "/" in txt: + return txt + if short.startswith("specfact-"): + return f"nold-ai/{short}" + return f"nold-ai/specfact-{short}" + + +def _latest_version_map_from_registry_index(idx: dict[str, Any] | None) -> dict[str, str]: + """Build module id -> latest_version from a single registry index fetch.""" + out: dict[str, str] = {} + if not idx: + return out + mods = idx.get("modules", []) + if not isinstance(mods, list): + return out + for raw in mods: + if not isinstance(raw, dict): + continue + raw_dict = cast(dict[str, Any], raw) + mid = str(raw_dict.get("id", "")).strip() + if not mid: + continue + lv = raw_dict.get("latest_version") + if lv is None: + continue + s = str(lv).strip() + if s: + out[mid] = s + return out + + +def _versions_equal_for_upgrade(current: str, latest: str) -> bool: + try: + return Version(current) == Version(latest) + except (InvalidVersion, ValueError): + return current.strip() == latest.strip() + + +def _is_major_version_increase(current: str, latest: str) -> bool: + try: + return Version(latest).major > Version(current).major + except (InvalidVersion, ValueError): + return False + + +def _upgrade_name_candidates(normalized: str, short: str, by_id: dict[str, dict[str, Any]]) -> list[str]: + candidates = [normalized] + if short != normalized: + candidates.append(short) + if "/" not in normalized and f"specfact-{normalized}" in by_id: + candidates.append(f"specfact-{normalized}") + return list(dict.fromkeys(candidates)) + + +def _resolve_marketplace_id_by_short(short: str, marketplace_by_id: dict[str, dict[str, Any]]) -> str | None: + for key in marketplace_by_id: + if key == short or str(key).endswith(f"/{short}"): + return key + return None + + +def _resolve_one_upgrade_name(raw: str, by_id: dict[str, dict[str, Any]]) -> str: + """Resolve a single CLI name to a module id key used in ``by_id`` / targets.""" + normalized = raw.strip() + if not normalized: + return normalized + if normalized.count("/") > 1: + console.print( + f"[red]Invalid module id {normalized!r}: use owner/repo or a short name (e.g. backlog), " + "not a multi-segment path.[/red]" + ) + raise typer.Exit(1) + short = normalized.split("/")[-1] + for cand in _upgrade_name_candidates(normalized, short, by_id): + if cand not in by_id: + continue + source = str(by_id[cand].get("source", "unknown")) + if source != "marketplace": + console.print( + f"[red]Cannot upgrade '{cand}' from source '{source}'. Only marketplace modules are upgradeable.[/red]" + ) + raise typer.Exit(1) + return cand + marketplace_by_id = {k: v for k, v in by_id.items() if str(v.get("source", "")) == "marketplace"} + resolved = _resolve_marketplace_id_by_short(short, marketplace_by_id) + if resolved is not None: + return resolved + console.print(f"[red]Module '{normalized}' is not installed and cannot be upgraded.[/red]") + raise typer.Exit(1) + + def _resolve_upgrade_target_ids( - module_name: str | None, - all: bool, + module_names: list[str] | None, + all_flag: bool, modules: list[dict[str, Any]], by_id: dict[str, dict[str, Any]], ) -> list[str]: - target_ids: list[str] = [] - if all or module_name is None: + if all_flag or not module_names: target_ids = [str(m.get("id", "")) for m in modules if str(m.get("source", "")) == "marketplace"] if not target_ids: console.print("[yellow]No marketplace-installed modules found to upgrade.[/yellow]") return target_ids - normalized = module_name - if normalized in by_id: - source = str(by_id[normalized].get("source", "unknown")) - if source != "marketplace": - console.print( - f"[red]Cannot upgrade '{normalized}' from source '{source}'. Only marketplace modules are upgradeable.[/red]" - ) - raise typer.Exit(1) - return [normalized] - prefixed = normalized if "/" in normalized else f"specfact/{normalized}" - return [prefixed] + return [_resolve_one_upgrade_name(raw, by_id) for raw in module_names] + + +def _major_upgrade_decision( + full_id: str, + current_v: str, + latest_v: str, + *, + yes: bool, +) -> tuple[bool, tuple[str, str, str] | None]: + """Return (should_install, skipped_major_tuple when skipping a major bump).""" + if not _is_major_version_increase(current_v, latest_v): + return True, None + if yes: + return True, None + if is_non_interactive(): + console.print( + f"[yellow]Skipping major upgrade for {full_id}: {current_v} -> {latest_v} " + "(non-interactive; use --yes to approve)[/yellow]" + ) + return False, (full_id, current_v, latest_v) + if not typer.confirm( + f"Major version upgrade for {full_id} ({current_v} -> {latest_v}). Continue?", + default=False, + ): + return False, (full_id, current_v, latest_v) + return True, None -def _run_marketplace_upgrades(target_ids: list[str], by_id: dict[str, dict[str, Any]]) -> None: +@dataclass +class _MarketplaceUpgradeAccum: + upgraded: list[tuple[str, str, str]] + up_to_date: list[str] + skipped_major: list[tuple[str, str, str]] + + +def _run_one_marketplace_upgrade_target( + target: str, + by_id: dict[str, dict[str, Any]], + latest_by_id: dict[str, str], + *, + yes: bool, + accum: _MarketplaceUpgradeAccum, +) -> None: + full_id = _full_marketplace_module_id_for_install(target) + row = _upgrade_row_for_target(target, by_id) + current_v = str(row.get("version", "unknown")).strip() + latest_v = str(row.get("latest_version") or "").strip() + if not latest_v: + latest_v = (latest_by_id.get(full_id, "") or "").strip() + + if latest_v and _versions_equal_for_upgrade(current_v, latest_v): + accum.up_to_date.append(full_id) + return + + if not latest_v: + with _module_upgrade_status(f"[cyan]Upgrading[/cyan] [bold]{full_id}[/bold] …"): + installed_path = install_module(full_id, reinstall=True) + accum.upgraded.append((full_id, current_v, _read_installed_module_version(installed_path))) + return + + should_install, skip_tuple = _major_upgrade_decision(full_id, current_v, latest_v, yes=yes) + if skip_tuple is not None: + accum.skipped_major.append(skip_tuple) + if should_install: + with _module_upgrade_status(f"[cyan]Upgrading[/cyan] [bold]{full_id}[/bold] …"): + installed_path = install_module(full_id, reinstall=True) + accum.upgraded.append((full_id, current_v, _read_installed_module_version(installed_path))) + + +def _run_marketplace_upgrades( + target_ids: list[str], + by_id: dict[str, dict[str, Any]], + latest_by_id: dict[str, str], + *, + yes: bool = False, +) -> None: upgraded: list[tuple[str, str, str]] = [] + up_to_date: list[str] = [] + skipped_major: list[tuple[str, str, str]] = [] failed: list[str] = [] + accum = _MarketplaceUpgradeAccum( + upgraded=upgraded, + up_to_date=up_to_date, + skipped_major=skipped_major, + ) + for target in target_ids: try: - module_id = target if "/" in target else f"specfact/{target}" - previous_version = str(by_id.get(target, {}).get("version", "unknown")) - installed_path = install_module(module_id, reinstall=True) - upgraded.append((module_id, previous_version, _read_installed_module_version(installed_path))) + _run_one_marketplace_upgrade_target(target, by_id, latest_by_id, yes=yes, accum=accum) except Exception as exc: console.print(f"[red]Failed upgrading {target}: {exc}[/red]") failed.append(target) @@ -986,29 +1336,50 @@ def _run_marketplace_upgrades(target_ids: list[str], by_id: dict[str, dict[str, console.print("[green]Upgraded:[/green]") for module_id, previous_version, new_version in upgraded: console.print(f" {module_id}: {previous_version} -> {new_version}") + + if up_to_date: + if upgraded or skipped_major: + console.print("[green]Already up to date:[/green]") + for mid in up_to_date: + console.print(f" {mid}") + else: + console.print("[green]All modules are up to date.[/green]") + + if skipped_major: + console.print("[yellow]Skipped (major bump):[/yellow]") + for mid, cv, lv in skipped_major: + console.print(f" {mid}: {cv} -> {lv}") + if failed: raise typer.Exit(1) @app.command() @beartype -@require( - _upgrade_module_name_optional, - "module_name must be non-empty if provided", -) +@require(_upgrade_module_names_valid, "each module name must be non-empty") def upgrade( - module_name: str | None = typer.Argument( - None, help="Installed module name (optional; omit to upgrade all marketplace modules)" - ), + module_names: Annotated[ + list[str] | None, + typer.Argument(help="Installed module name(s); omit to upgrade all marketplace modules"), + ] = None, all: bool = typer.Option(False, "--all", help="Upgrade all installed marketplace modules"), + yes: bool = typer.Option(False, "--yes", "-y", help="Approve major version upgrades without prompting"), ) -> None: """Upgrade marketplace module(s) to latest available versions.""" modules = get_modules_with_state() by_id = {str(m.get("id", "")): m for m in modules} - target_ids = _resolve_upgrade_target_ids(module_name, all, modules, by_id) + target_ids = _resolve_upgrade_target_ids(module_names, all, modules, by_id) if not target_ids: return - _run_marketplace_upgrades(target_ids, by_id) + with _module_upgrade_status("[dim]Fetching marketplace registry index…[/dim]"): + index = fetch_registry_index() + if index is None: + console.print( + "[yellow]Marketplace registry unavailable (offline or network error). " + "Upgrade will use installed metadata only.[/yellow]" + ) + latest_by_id = _latest_version_map_from_registry_index(index) + _run_marketplace_upgrades(target_ids, by_id, latest_by_id, yes=yes) # Expose standard ModuleIOContract operations for protocol compliance discovery. @@ -1017,6 +1388,35 @@ def upgrade( sync_with_bundle = module_io_shim.sync_with_bundle validate_bundle = module_io_shim.validate_bundle + +def _ensure_specfact_install_param_patch() -> None: + """When this module is imported before ``specfact_cli.cli`` (e.g. unit tests), Typer must + still resolve CLI params from merged install signatures instead of the thin ``install`` wrapper. + If ``cli`` already patched ``typer.utils.get_params_from_function``, skip. + + Match by name/module because ``@app.command()`` wraps the callback, so ``func is install`` fails. + """ + import importlib + + import typer.utils as tu + + if getattr(tu.get_params_from_function, "__name__", "") == "_specfact_get_params_from_function": + return + prev = tu.get_params_from_function + _mod = "specfact_cli.modules.module_registry.src.commands" + + def _wrapped(func: Callable[..., Any]) -> Any: + if getattr(func, "__name__", "") == "install" and getattr(func, "__module__", "") == _mod: + return _specfact_merge_install_param_specs(prev) + return prev(func) + + tu.get_params_from_function = _wrapped # type: ignore[assignment] + typer_main = cast(Any, importlib.import_module("typer.main")) + typer_main.get_params_from_function = _wrapped # type: ignore[assignment] + + +_ensure_specfact_install_param_patch() + __all__ = [ "app", "export_from_bundle", diff --git a/src/specfact_cli/registry/bootstrap.py b/src/specfact_cli/registry/bootstrap.py index c9ad6351..2626fbf3 100644 --- a/src/specfact_cli/registry/bootstrap.py +++ b/src/specfact_cli/registry/bootstrap.py @@ -4,8 +4,18 @@ Commands are discovered from configured module-package roots. Loaders import each package's src on first use and return its .app (Typer). cli.py must not import command modules at top level; it uses the registry. -When category_grouping_enabled is True, mounts category groups (code, backlog, project, spec, govern) -and compat shims for flat commands; otherwise mounts all modules flat. + +Topology (see ``register_module_package_commands`` in ``module_packages``): + +- When ``category_grouping_enabled`` is True (default): each non-core module registers under its + category path when ``meta.category`` is set. After packages load, + ``_mount_installed_category_groups`` mounts the category group commands (``code``, ``backlog``, + ``project``, ``spec``, ``govern``) for bundles that are installed and enabled. Legacy flat + aliases (for example ``analyze`` or ``plan`` at the root) are not registered. + +- When ``category_grouping_enabled`` is False: the same modules register with + ``_register_command_flat_path``, exposing each declared command name at the root (flat topology). + This is a compatibility path for older layouts; grouped categories are the default. """ from __future__ import annotations diff --git a/src/specfact_cli/registry/dependency_resolver.py b/src/specfact_cli/registry/dependency_resolver.py index c1c44d21..c4f7b2ed 100644 --- a/src/specfact_cli/registry/dependency_resolver.py +++ b/src/specfact_cli/registry/dependency_resolver.py @@ -21,6 +21,14 @@ class DependencyConflictError(Exception): """Raised when pip dependency resolution detects conflicting version constraints.""" +class PipDependencyValidationUnavailableError(RuntimeError): + """Raised when pip is unavailable and pip dependency validation must not be skipped.""" + + +class PipDependencyInstallError(Exception): + """Raised when installation of resolved pip requirements fails.""" + + @beartype def _pip_tools_available() -> bool: """Return True if pip-compile is available.""" @@ -42,27 +50,62 @@ def _run_pip_compile(constraints: list[str]) -> list[str]: if not constraints: return [] with tempfile.TemporaryDirectory() as tmp: - reqs = Path(tmp) / "requirements.in" + tmp_path = Path(tmp) + reqs = tmp_path / "requirements.in" + out_path = tmp_path / "requirements.txt" reqs.write_text("\n".join(constraints), encoding="utf-8") result = subprocess.run( - ["pip-compile", "--dry-run", "--no-annotate", str(reqs)], + ["pip-compile", "--no-annotate", "-o", str(out_path), str(reqs)], capture_output=True, text=True, timeout=120, ) if result.returncode != 0: raise DependencyConflictError(result.stderr or result.stdout or "pip-compile failed") - out = (Path(tmp) / "requirements.txt").read_text() if (Path(tmp) / "requirements.txt").exists() else "" - if not out: + if not out_path.exists(): + return [] + out = out_path.read_text(encoding="utf-8") + if not out.strip(): return [] return [L.strip() for L in out.splitlines() if L.strip() and not L.strip().startswith("#")] @beartype -def _run_basic_resolver(constraints: list[str]) -> list[str]: - """Fallback: use pip's resolver (e.g. pip install --dry-run). Returns best-effort pinned list.""" +def _pip_module_available() -> bool: + """Return True if pip is importable in the current Python environment.""" + try: + result = subprocess.run( + [sys.executable, "-m", "pip", "--version"], + capture_output=True, + text=True, + timeout=5, + check=False, + ) + return result.returncode == 0 + except (FileNotFoundError, subprocess.TimeoutExpired, OSError): + return False + + +@beartype +def _run_basic_resolver(constraints: list[str], *, allow_unvalidated: bool = False) -> list[str]: + """Fallback: use pip's resolver (e.g. pip install --dry-run). Returns best-effort pinned list. + + When pip is not available (e.g. uvx environment), validation is skipped only if + ``allow_unvalidated`` is True; otherwise :class:`PipDependencyValidationUnavailableError` is raised. + """ if not constraints: return [] + if not _pip_module_available(): + if allow_unvalidated: + logger.warning( + "pip is not available in the current environment (e.g. uvx). " + "Skipping pip dependency validation β€” packages will be checked at install time." + ) + return constraints + raise PipDependencyValidationUnavailableError( + "pip is not available in this environment; cannot validate pip dependency constraints. " + "Install pip, or invoke resolution from a flow that explicitly allows unvalidated constraints." + ) logger.warning("pip-tools not found, using basic resolver") with tempfile.TemporaryDirectory() as tmp: reqs = Path(tmp) / "requirements.in" @@ -99,11 +142,45 @@ def _collect_constraints(modules: list[ModulePackageMetadata]) -> list[str]: @beartype @require(lambda modules: all(isinstance(m, ModulePackageMetadata) for m in modules)) @ensure(lambda result: isinstance(result, list)) -def resolve_dependencies(modules: list[ModulePackageMetadata]) -> list[str]: - """Resolve pip dependencies across all modules; use pip-compile or fallback. Raises DependencyConflictError on conflict.""" +def resolve_dependencies( + modules: list[ModulePackageMetadata], + *, + allow_unvalidated: bool = False, +) -> list[str]: + """Resolve pip dependencies across all modules; use pip-compile or fallback. + + Raises DependencyConflictError on conflict. + When pip-tools and pip are unavailable, raises PipDependencyValidationUnavailableError unless + ``allow_unvalidated`` is True (supported pip-free flows such as module install under uvx). + """ constraints = _collect_constraints(modules) if not constraints: return [] if _pip_tools_available(): return _run_pip_compile(constraints) - return _run_basic_resolver(constraints) + return _run_basic_resolver(constraints, allow_unvalidated=allow_unvalidated) + + +@beartype +@require(lambda pinned: isinstance(pinned, list) and all(isinstance(x, str) for x in pinned)) +def install_resolved_pip_requirements(pinned: list[str]) -> None: + """Install pinned or constraint lines into the active interpreter (same as the CLI). + + If ``pip`` is not available (e.g. minimal uvx runtime), logs a warning and returns without raising. + Raises :class:`PipDependencyInstallError` when pip is present but installation fails. + """ + if not pinned: + return + if not _pip_module_available(): + logger.warning( + "pip is not available in this environment; skipping install of %s marketplace pip " + "requirement(s). Install them manually or use a full Python environment.", + len(pinned), + ) + return + cmd = [sys.executable, "-m", "pip", "install", "--no-input", *pinned] + logger.info("Installing %s resolved pip requirement(s) for marketplace modules", len(pinned)) + result = subprocess.run(cmd, capture_output=True, text=True, timeout=600) + if result.returncode != 0: + detail = (result.stderr or result.stdout or "pip install failed").strip() + raise PipDependencyInstallError(detail) diff --git a/src/specfact_cli/registry/module_installer.py b/src/specfact_cli/registry/module_installer.py index e004966b..f52c4f4c 100644 --- a/src/specfact_cli/registry/module_installer.py +++ b/src/specfact_cli/registry/module_installer.py @@ -24,7 +24,12 @@ from specfact_cli.common import get_bridge_logger from specfact_cli.models.module_package import ModulePackageMetadata from specfact_cli.registry.crypto_validator import verify_checksum, verify_signature -from specfact_cli.registry.dependency_resolver import DependencyConflictError, resolve_dependencies +from specfact_cli.registry.dependency_resolver import ( + DependencyConflictError, + PipDependencyInstallError, + install_resolved_pip_requirements, + resolve_dependencies, +) from specfact_cli.registry.marketplace_client import download_module from specfact_cli.registry.module_discovery import discover_all_modules from specfact_cli.registry.module_security import assert_module_allowed, ensure_publisher_trusted @@ -132,15 +137,31 @@ def _download_archive_with_cache(module_id: str, version: str | None = None) -> @beartype def _extract_bundle_dependencies(metadata: dict[str, Any]) -> list[str]: - """Extract validated bundle dependency module ids from raw manifest metadata.""" + """Extract validated bundle dependency module ids from raw manifest metadata. + + Supports both plain string entries ("namespace/name") and versioned object entries + ({"id": "namespace/name", "version": ">=x.y.z"}). + """ raw_dependencies = metadata.get("bundle_dependencies", []) if not isinstance(raw_dependencies, list): return [] dependencies: list[str] = [] - for value in raw_dependencies: - dep = str(value).strip() - if not dep: - continue + for index, value in enumerate(raw_dependencies): + if isinstance(value, dict): + entry = cast(dict[str, Any], value) + raw_id = entry.get("id") + if raw_id is None or not str(raw_id).strip(): + raise ValueError( + f"bundle_dependencies[{index}]: object entry must include non-empty 'id' " + f"(invalid manifest; got {value!r})" + ) + dep = str(raw_id).strip() + else: + dep = str(value).strip() + if not dep: + raise ValueError( + f"bundle_dependencies[{index}]: string entry must be non-empty (invalid manifest; got {value!r})" + ) _validate_marketplace_namespace_format(dep) dependencies.append(dep) return dependencies @@ -725,7 +746,11 @@ def _validate_install_manifest_constraints( assert_module_allowed(manifest_module_name) compatibility = str(metadata.get("core_compatibility", "")).strip() if compatibility and Version(cli_version) not in SpecifierSet(compatibility): - raise ValueError("Module is incompatible with current SpecFact CLI version") + raise ValueError( + f"Module '{manifest_module_name}' requires SpecFact CLI {compatibility}, " + f"but the installed version is {cli_version}. " + f"Run: specfact upgrade (or: pip install --upgrade specfact-cli)" + ) publisher_name: str | None = None publisher_raw = metadata.get("publisher") if isinstance(publisher_raw, dict): @@ -782,13 +807,20 @@ def _install_bundle_dependencies_for_module( try: all_metas = [e.metadata for e in discover_all_modules()] all_metas.append(metadata_obj) - resolve_dependencies(all_metas) + resolved = resolve_dependencies(all_metas, allow_unvalidated=True) except DependencyConflictError as dep_err: if not force: raise ValueError( f"Dependency conflict: {dep_err}. Use --force to bypass or --skip-deps to skip resolution." ) from dep_err logger.warning("Dependency conflict bypassed by --force: %s", dep_err) + return + if not resolved: + return + try: + install_resolved_pip_requirements(resolved) + except PipDependencyInstallError as pip_err: + raise ValueError(f"Failed to install resolved pip dependencies: {pip_err}") from pip_err def _atomic_place_verified_module( diff --git a/src/specfact_cli/registry/module_packages.py b/src/specfact_cli/registry/module_packages.py index ed782a77..ac25fdcd 100644 --- a/src/specfact_cli/registry/module_packages.py +++ b/src/specfact_cli/registry/module_packages.py @@ -14,6 +14,7 @@ import importlib.util import os import sys +from dataclasses import dataclass from pathlib import Path from typing import Any, cast @@ -48,6 +49,46 @@ from specfact_cli.utils.prompts import print_warning +@dataclass +class _ProtocolTopLevelScanState: + package_dir: Path + package_name: str + pending_paths: list[Path] + scanned_paths: set[Path] + exported_function_names: set[str] + class_method_names: dict[str, set[str]] + assigned_names: dict[str, ast.expr] + + +@dataclass +class _ProtocolComplianceCounters: + protocol_full: list[int] + protocol_partial: list[int] + protocol_legacy: list[int] + partial_modules: list[tuple[str, list[str]]] + legacy_modules: list[str] + + +@dataclass +class _ModuleIntegrityContext: + allow_unsigned: bool + is_test_mode: bool + logger: Any + skipped: list[tuple[str, str]] + + +@dataclass +class _PackageRegistrationContext: + enabled_map: dict[str, bool] + allow_unsigned: bool + is_test_mode: bool + logger: Any + skipped: list[tuple[str, str]] + bridge_owner_map: dict[str, str] + category_grouping_enabled: bool + counters: _ProtocolComplianceCounters + + # Display order for core modules (3 after migration-03); others follow alphabetically. CORE_NAMES = ("init", "module", "upgrade") CORE_MODULE_ORDER: tuple[str, ...] = ( @@ -709,39 +750,6 @@ def _resolve_package_load_path(package_dir: Path, package_name: str) -> Path: raise ValueError(f"Package {package_dir.name} has no src/app.py, src/{package_name}.py or src/{package_name}/") -def _load_package_module(package_dir: Path, package_name: str) -> Any: - """Load and return a module package entrypoint module.""" - src_dir = package_dir / "src" - if str(src_dir) not in sys.path: - sys.path.insert(0, str(src_dir)) - load_path = _resolve_package_load_path(package_dir, package_name) - submodule_locations = [str(load_path.parent)] if load_path.name == "__init__.py" else None - module_token = _normalized_module_name(package_dir.name) - spec = importlib.util.spec_from_file_location( - f"_specfact_module_{module_token}", - load_path, - submodule_search_locations=submodule_locations, - ) - if spec is None or spec.loader is None: - raise ValueError(f"Cannot load from {package_dir.name}") - mod = importlib.util.module_from_spec(spec) - sys.modules[spec.name] = mod - spec.loader.exec_module(mod) - return mod - - -@beartype -@require(lambda module_class: module_class is not None, "Module class must be provided") -@ensure(lambda result: isinstance(result, list), "Protocol operation list must be returned") -def _check_protocol_compliance(module_class: Any) -> list[str]: - """Return supported protocol operations based on available attributes.""" - operations: list[str] = [] - for operation, method_name in PROTOCOL_METHODS.items(): - if hasattr(module_class, method_name): - operations.append(operation) - return operations - - def _resolve_protocol_source_paths( package_dir: Path, package_name: str, @@ -829,41 +837,31 @@ def _protocol_record_assignments( exported_function_names.add(target.id) -def _protocol_process_top_level_node( - node: ast.stmt, - package_dir: Path, - package_name: str, - source_path: Path, - pending_paths: list[Path], - scanned_paths: set[Path], - exported_function_names: set[str], - class_method_names: dict[str, set[str]], - assigned_names: dict[str, ast.expr], -) -> None: +def _protocol_process_top_level_node(node: ast.stmt, source_path: Path, state: _ProtocolTopLevelScanState) -> None: if isinstance(node, ast.ClassDef): methods: set[str] = set() for class_node in node.body: if isinstance(class_node, (ast.FunctionDef, ast.AsyncFunctionDef)): methods.add(class_node.name) - class_method_names[node.name] = methods + state.class_method_names[node.name] = methods return if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)): - exported_function_names.add(node.name) + state.exported_function_names.add(node.name) return if isinstance(node, ast.ImportFrom): imported_names = {alias.name for alias in node.names} if set(PROTOCOL_INTERFACE_BINDINGS).isdisjoint(imported_names): return - imported_source = _resolve_import_from_source_path(package_dir, package_name, source_path, node) + imported_source = _resolve_import_from_source_path(state.package_dir, state.package_name, source_path, node) if imported_source is None: return resolved = imported_source.resolve() - if resolved in scanned_paths: + if resolved in state.scanned_paths: return - scanned_paths.add(resolved) - pending_paths.append(imported_source) + state.scanned_paths.add(resolved) + state.pending_paths.append(imported_source) return - _protocol_record_assignments(node, assigned_names, exported_function_names) + _protocol_record_assignments(node, state.assigned_names, state.exported_function_names) def _protocol_merge_binding_methods( @@ -909,26 +907,27 @@ def _check_protocol_compliance_from_source( scanned_sources: list[str] = [] pending_paths = _resolve_protocol_source_paths(package_dir, package_name, command_names=command_names) scanned_paths = {path.resolve() for path in pending_paths} + scan_state = _ProtocolTopLevelScanState( + package_dir=package_dir, + package_name=package_name, + pending_paths=pending_paths, + scanned_paths=scanned_paths, + exported_function_names=exported_function_names, + class_method_names=class_method_names, + assigned_names=assigned_names, + ) - while pending_paths: - source_path = pending_paths.pop(0) + while scan_state.pending_paths: + source_path = scan_state.pending_paths.pop(0) source = source_path.read_text(encoding="utf-8") scanned_sources.append(source) tree = ast.parse(source, filename=str(source_path)) for node in tree.body: - _protocol_process_top_level_node( - node, - package_dir, - package_name, - source_path, - pending_paths, - scanned_paths, - exported_function_names, - class_method_names, - assigned_names, - ) + _protocol_process_top_level_node(node, source_path, scan_state) - _protocol_merge_binding_methods(assigned_names, class_method_names, exported_function_names) + _protocol_merge_binding_methods( + scan_state.assigned_names, scan_state.class_method_names, scan_state.exported_function_names + ) operations: list[str] = [] for operation, method_name in PROTOCOL_METHODS.items(): @@ -1122,24 +1121,17 @@ def _register_service_bridges_safe(meta: Any, bridge_owner_map: dict[str, str], ) -def _module_integrity_allows_load( - package_dir: Path, - meta: Any, - allow_unsigned: bool, - is_test_mode: bool, - logger: Any, - skipped: list[tuple[str, str]], -) -> bool: - if verify_module_artifact(package_dir, meta, allow_unsigned=allow_unsigned): +def _module_integrity_allows_load(package_dir: Path, meta: Any, ctx: _ModuleIntegrityContext) -> bool: + if verify_module_artifact(package_dir, meta, allow_unsigned=ctx.allow_unsigned): return True if _is_builtin_module_package(package_dir): - logger.warning( + ctx.logger.warning( "Built-in module '%s' failed integrity verification; loading anyway to keep CLI functional.", meta.name, ) return True - if is_test_mode and allow_unsigned: - logger.debug( + if ctx.is_test_mode and ctx.allow_unsigned: + ctx.logger.debug( "TEST_MODE: allowing built-in module '%s' despite failed integrity verification.", meta.name, ) @@ -1149,41 +1141,47 @@ def _module_integrity_allows_load( "This may indicate tampering or an outdated local module copy. " "Run `specfact module init` to restore trusted bundled modules." ) - skipped.append((meta.name, "integrity/trust check failed")) + ctx.skipped.append((meta.name, "integrity/trust check failed")) return False +def _apply_protocol_counters_from_operations( + meta: Any, + operations: list[str], + logger: Any, + counters: _ProtocolComplianceCounters, +) -> None: + if len(operations) == 4: + counters.protocol_full[0] += 1 + return + if operations: + counters.partial_modules.append((meta.name, operations)) + if is_debug_mode(): + logger.info("Module %s: ModuleIOContract partial (%s)", meta.name, ", ".join(operations)) + counters.protocol_partial[0] += 1 + return + counters.legacy_modules.append(meta.name) + if is_debug_mode(): + logger.warning("Module %s: No ModuleIOContract (legacy mode)", meta.name) + counters.protocol_legacy[0] += 1 + + def _record_protocol_compliance_result( package_dir: Path, meta: Any, logger: Any, - protocol_full: list[int], - protocol_partial: list[int], - protocol_legacy: list[int], - partial_modules: list[tuple[str, list[str]]], - legacy_modules: list[str], + counters: _ProtocolComplianceCounters, ) -> None: try: operations = _check_protocol_compliance_from_source(package_dir, meta.name, command_names=meta.commands) meta.protocol_operations = operations - if len(operations) == 4: - protocol_full[0] += 1 - elif operations: - partial_modules.append((meta.name, operations)) - if is_debug_mode(): - logger.info("Module %s: ModuleIOContract partial (%s)", meta.name, ", ".join(operations)) - protocol_partial[0] += 1 - else: - legacy_modules.append(meta.name) - if is_debug_mode(): - logger.warning("Module %s: No ModuleIOContract (legacy mode)", meta.name) - protocol_legacy[0] += 1 + _apply_protocol_counters_from_operations(meta, operations, logger, counters) except Exception as exc: - legacy_modules.append(meta.name) + counters.legacy_modules.append(meta.name) if is_debug_mode(): logger.warning("Module %s: Unable to inspect protocol compliance (%s)", meta.name, exc) meta.protocol_operations = [] - protocol_legacy[0] += 1 + counters.protocol_legacy[0] += 1 def _register_command_category_path( @@ -1283,49 +1281,42 @@ def _register_commands_for_package( category_grouping_enabled: bool, logger: Any, ) -> None: + """Register package commands. Categorized marketplace modules never use flat root registration.""" + _ = category_grouping_enabled # retained for API compatibility; grouping no longer selects flat vs category for cmd_name in meta.commands: - if category_grouping_enabled and meta.category is not None: + if meta.category is not None: _register_command_category_path(package_dir, meta, cmd_name, logger) else: _register_command_flat_path(package_dir, meta, cmd_name, logger) -def _register_one_package_if_eligible( - package_dir: Path, - meta: Any, - enabled_map: dict[str, bool], - allow_unsigned: bool, - is_test_mode: bool, - logger: Any, - skipped: list[tuple[str, str]], - bridge_owner_map: dict[str, str], - category_grouping_enabled: bool, - protocol_full: list[int], - protocol_partial: list[int], - protocol_legacy: list[int], - partial_modules: list[tuple[str, list[str]]], - legacy_modules: list[str], -) -> None: - if not enabled_map.get(meta.name, True): +def _register_one_package_if_eligible(package_dir: Path, meta: Any, reg: _PackageRegistrationContext) -> None: + if not reg.enabled_map.get(meta.name, True): return compatible = _check_core_compatibility(meta, cli_version) if not compatible: - skipped.append((meta.name, f"requires {meta.core_compatibility}, cli is {cli_version}")) + reg.skipped.append((meta.name, f"requires {meta.core_compatibility}, cli is {cli_version}")) return - deps_ok, missing = _validate_module_dependencies(meta, enabled_map) + deps_ok, missing = _validate_module_dependencies(meta, reg.enabled_map) if not deps_ok: - skipped.append((meta.name, f"missing dependencies: {', '.join(missing)}")) + reg.skipped.append((meta.name, f"missing dependencies: {', '.join(missing)}")) return - if not _module_integrity_allows_load(package_dir, meta, allow_unsigned, is_test_mode, logger, skipped): + integrity_ctx = _ModuleIntegrityContext( + allow_unsigned=reg.allow_unsigned, + is_test_mode=reg.is_test_mode, + logger=reg.logger, + skipped=reg.skipped, + ) + if not _module_integrity_allows_load(package_dir, meta, integrity_ctx): return if not _check_schema_compatibility(meta.schema_version, CURRENT_PROJECT_SCHEMA_VERSION): - skipped.append( + reg.skipped.append( ( meta.name, f"schema version {meta.schema_version} required, current is {CURRENT_PROJECT_SCHEMA_VERSION}", ) ) - logger.debug( + reg.logger.debug( "Module %s: Schema version %s required, but current is %s (skipped)", meta.name, meta.schema_version, @@ -1333,34 +1324,18 @@ def _register_one_package_if_eligible( ) return if meta.schema_version is None: - logger.debug("Module %s: No schema version declared (assuming current)", meta.name) + reg.logger.debug("Module %s: No schema version declared (assuming current)", meta.name) else: - logger.debug("Module %s: Schema version %s (compatible)", meta.name, meta.schema_version) - - _register_schema_extensions_safe(meta, logger) - _register_service_bridges_safe(meta, bridge_owner_map, logger) - _record_protocol_compliance_result( - package_dir, - meta, - logger, - protocol_full, - protocol_partial, - protocol_legacy, - partial_modules, - legacy_modules, - ) - _register_commands_for_package(package_dir, meta, category_grouping_enabled, logger) + reg.logger.debug("Module %s: Schema version %s (compatible)", meta.name, meta.schema_version) + _register_schema_extensions_safe(meta, reg.logger) + _register_service_bridges_safe(meta, reg.bridge_owner_map, reg.logger) + _record_protocol_compliance_result(package_dir, meta, reg.logger, reg.counters) + _register_commands_for_package(package_dir, meta, reg.category_grouping_enabled, reg.logger) -def _log_protocol_compatibility_footer( - logger: Any, - protocol_full: list[int], - protocol_partial: list[int], - protocol_legacy: list[int], - partial_modules: list[tuple[str, list[str]]], - legacy_modules: list[str], -) -> None: - pf, pp, pl = protocol_full[0], protocol_partial[0], protocol_legacy[0] + +def _log_protocol_compatibility_footer(logger: Any, counters: _ProtocolComplianceCounters) -> None: + pf, pp, pl = counters.protocol_full[0], counters.protocol_partial[0], counters.protocol_legacy[0] discovered_count = pf + pp + pl if not discovered_count or not (pp > 0 or pl > 0) or not is_debug_mode(): return @@ -1372,11 +1347,11 @@ def _log_protocol_compatibility_footer( pp, pl, ) - if partial_modules: - partial_desc = ", ".join(f"{name} ({'/'.join(ops)})" for name, ops in sorted(partial_modules)) + if counters.partial_modules: + partial_desc = ", ".join(f"{name} ({'/'.join(ops)})" for name, ops in sorted(counters.partial_modules)) logger.info("Partially compliant modules: %s", partial_desc) - if legacy_modules: - logger.info("Legacy modules: %s", ", ".join(sorted(set(legacy_modules)))) + if counters.legacy_modules: + logger.info("Legacy modules: %s", ", ".join(sorted(set(counters.legacy_modules)))) def _log_skipped_modules_debug(logger: Any, skipped: list[tuple[str, str]]) -> None: @@ -1400,7 +1375,8 @@ def register_module_package_commands( Call after register_builtin_commands(). enable_ids/disable_ids from CLI (--enable-module/--disable-module). allow_unsigned: If True, allow modules without integrity metadata. Default from SPECFACT_ALLOW_UNSIGNED env. - category_grouping_enabled: If True, register category groups (code, backlog, project, spec, govern). + category_grouping_enabled: Ignored for registration (retained for API compatibility). Category groups are + always mounted for installed bundles; categorized modules never register flat root aliases. """ enable_ids = enable_ids or [] disable_ids = disable_ids or [] @@ -1416,41 +1392,30 @@ def register_module_package_commands( enabled_map = merge_module_state(discovered_list, state, enable_ids, disable_ids) logger = get_bridge_logger(__name__) skipped: list[tuple[str, str]] = [] - protocol_full = [0] - protocol_partial = [0] - protocol_legacy = [0] - partial_modules: list[tuple[str, list[str]]] = [] - legacy_modules: list[str] = [] + counters = _ProtocolComplianceCounters( + protocol_full=[0], + protocol_partial=[0], + protocol_legacy=[0], + partial_modules=[], + legacy_modules=[], + ) bridge_owner_map: dict[str, str] = { bridge_id: BRIDGE_REGISTRY.get_owner(bridge_id) or "unknown" for bridge_id in BRIDGE_REGISTRY.list_bridge_ids() } - for package_dir, meta in packages: - _register_one_package_if_eligible( - package_dir, - meta, - enabled_map, - allow_unsigned, - is_test_mode, - logger, - skipped, - bridge_owner_map, - category_grouping_enabled, - protocol_full, - protocol_partial, - protocol_legacy, - partial_modules, - legacy_modules, - ) - if category_grouping_enabled: - _mount_installed_category_groups(packages, enabled_map) - _log_protocol_compatibility_footer( - logger, - protocol_full, - protocol_partial, - protocol_legacy, - partial_modules, - legacy_modules, + reg_ctx = _PackageRegistrationContext( + enabled_map=enabled_map, + allow_unsigned=allow_unsigned, + is_test_mode=is_test_mode, + logger=logger, + skipped=skipped, + bridge_owner_map=bridge_owner_map, + category_grouping_enabled=category_grouping_enabled, + counters=counters, ) + for package_dir, meta in packages: + _register_one_package_if_eligible(package_dir, meta, reg_ctx) + _mount_installed_category_groups(packages, enabled_map) + _log_protocol_compatibility_footer(logger, counters) _log_skipped_modules_debug(logger, skipped) diff --git a/tests/e2e/test_bundle_extraction_e2e.py b/tests/e2e/test_bundle_extraction_e2e.py index 85b0d180..b937a990 100644 --- a/tests/e2e/test_bundle_extraction_e2e.py +++ b/tests/e2e/test_bundle_extraction_e2e.py @@ -96,6 +96,9 @@ def test_publish_install_verify_roundtrip_for_specfact_codebase(monkeypatch, tmp assert tarball.exists() monkeypatch.setattr("specfact_cli.registry.module_installer.resolve_dependencies", lambda *_a, **_k: None) + monkeypatch.setattr( + "specfact_cli.registry.module_installer.install_resolved_pip_requirements", lambda *_a, **_k: None + ) monkeypatch.setattr("specfact_cli.registry.module_installer.verify_module_artifact", lambda *_a, **_k: True) monkeypatch.setattr("specfact_cli.registry.module_installer.ensure_publisher_trusted", lambda *_a, **_k: None) monkeypatch.setattr("specfact_cli.registry.module_installer.assert_module_allowed", lambda *_a, **_k: None) diff --git a/tests/e2e/test_core_slimming_e2e.py b/tests/e2e/test_core_slimming_e2e.py index 861476b6..5a51ca9e 100644 --- a/tests/e2e/test_core_slimming_e2e.py +++ b/tests/e2e/test_core_slimming_e2e.py @@ -2,6 +2,7 @@ from __future__ import annotations +from collections.abc import Generator from pathlib import Path import pytest @@ -9,7 +10,7 @@ @pytest.fixture(autouse=True) -def _reset_registry(): +def _reset_registry() -> Generator[None, None, None]: """Ensure registry is cleared so E2E sees predictable bootstrap state when we re-bootstrap.""" from specfact_cli.registry import CommandRegistry diff --git a/tests/e2e/test_wow_entrypoint.py b/tests/e2e/test_wow_entrypoint.py new file mode 100644 index 00000000..c80b29a9 --- /dev/null +++ b/tests/e2e/test_wow_entrypoint.py @@ -0,0 +1,103 @@ +"""E2E checks for the canonical wow entry path (solo-developer init in a temp git repo). + +Full `code review run` execution requires bundled marketplace modules; here we verify the +documented first step (init) succeeds in a real temp git workspace and that the registry +surface expected for the second step is consistent with the README/docs contract. +""" + +from __future__ import annotations + +import subprocess +from collections.abc import Iterator +from pathlib import Path + +import pytest +from typer.testing import CliRunner + +from specfact_cli.cli import app +from specfact_cli.registry import CommandRegistry +from specfact_cli.registry.bootstrap import register_builtin_commands + + +@pytest.fixture(autouse=True) +def _reset_registry() -> Iterator[None]: + CommandRegistry._clear_for_testing() + yield + CommandRegistry._clear_for_testing() + + +runner = CliRunner() + + +@pytest.fixture +def patch_init_wow_dependencies(monkeypatch: pytest.MonkeyPatch) -> None: + """Stub init side effects so profile install can be exercised without real bundle I/O.""" + monkeypatch.setattr( + "specfact_cli.modules.init.src.commands.install_bundles_for_init", + lambda *a, **k: None, + ) + monkeypatch.setattr( + "specfact_cli.modules.init.src.commands.get_discovered_modules_for_state", + lambda **_: [{"id": "init", "enabled": True}], + ) + monkeypatch.setattr("specfact_cli.modules.init.src.commands.write_modules_state", lambda _: None) + monkeypatch.setattr( + "specfact_cli.modules.init.src.commands.run_discovery_and_write_cache", + lambda _: None, + ) + monkeypatch.setattr("specfact_cli.modules.init.src.commands.is_first_run", lambda **_: True) + + +def test_init_solo_developer_exits_zero_in_temp_git_repo(tmp_path: Path, patch_init_wow_dependencies: None) -> None: + """Documented path step 1: init --profile solo-developer in a repo (git init like a real user).""" + subprocess.run(["git", "init"], cwd=tmp_path, check=True, capture_output=True) + result = runner.invoke( + app, + ["init", "--repo", str(tmp_path), "--profile", "solo-developer"], + catch_exceptions=False, + ) + assert result.exit_code == 0, result.stdout + result.stderr + + +def test_after_wow_profile_mock_bundles_registry_lists_code_for_step_two( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path, patch_init_wow_dependencies: None +) -> None: + """Step 2 needs code + code-review bundles; registry exposes `code` group when both are 'installed'.""" + subprocess.run(["git", "init"], cwd=tmp_path, check=True, capture_output=True) + init_r = runner.invoke( + app, + ["init", "--repo", str(tmp_path), "--profile", "solo-developer"], + catch_exceptions=False, + ) + assert init_r.exit_code == 0 + + CommandRegistry._clear_for_testing() + monkeypatch.setattr( + "specfact_cli.registry.module_packages.get_installed_bundles", + lambda _p, _e: ["specfact-codebase", "specfact-code-review"], + ) + register_builtin_commands() + names = CommandRegistry.list_commands() + assert "code" in names, f"Expected code group when codebase+code-review bundles present; got {names}" + + +def test_after_wow_profile_only_code_review_does_not_expose_code_command( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path, patch_init_wow_dependencies: None +) -> None: + """Category groups map specfact-codebase -> `code`; code-review alone must not mount that group.""" + subprocess.run(["git", "init"], cwd=tmp_path, check=True, capture_output=True) + init_r = runner.invoke( + app, + ["init", "--repo", str(tmp_path), "--profile", "solo-developer"], + catch_exceptions=False, + ) + assert init_r.exit_code == 0 + + CommandRegistry._clear_for_testing() + monkeypatch.setattr( + "specfact_cli.registry.module_packages.get_installed_bundles", + lambda _p, _e: ["specfact-code-review"], + ) + register_builtin_commands() + names = CommandRegistry.list_commands() + assert "code" not in names, f"Expected no `code` group when only specfact-code-review is installed; got {names}" diff --git a/tests/integration/test_bundle_install.py b/tests/integration/test_bundle_install.py index 91dfb248..fc71e753 100644 --- a/tests/integration/test_bundle_install.py +++ b/tests/integration/test_bundle_install.py @@ -50,6 +50,9 @@ def _create_module_tarball( def _stub_install_runtime(monkeypatch) -> None: monkeypatch.setattr("specfact_cli.registry.module_installer.resolve_dependencies", lambda *_a, **_k: None) + monkeypatch.setattr( + "specfact_cli.registry.module_installer.install_resolved_pip_requirements", lambda *_a, **_k: None + ) monkeypatch.setattr("specfact_cli.registry.module_installer.verify_module_artifact", lambda *_a, **_k: True) monkeypatch.setattr("specfact_cli.registry.module_installer.ensure_publisher_trusted", lambda *_a, **_k: None) monkeypatch.setattr("specfact_cli.registry.module_installer.assert_module_allowed", lambda *_a, **_k: None) diff --git a/tests/integration/test_category_group_routing.py b/tests/integration/test_category_group_routing.py index 10834871..3c2c4103 100644 --- a/tests/integration/test_category_group_routing.py +++ b/tests/integration/test_category_group_routing.py @@ -48,9 +48,10 @@ def test_backlog_help_lists_subcommands() -> None: assert "backlog" in out assert "policy" in out or "ceremony" in out return - assert "command 'backlog' is not installed." in out - assert "specfact init --profile " in out - assert "module install " in out + merged = " ".join(out.split()) + assert "module 'nold-ai/specfact-backlog' is not installed." in merged + assert "specfact module install nold-ai/specfact-backlog" in merged + assert "specfact init --profile " in merged def test_validate_flat_command_is_not_available() -> None: diff --git a/tests/integration/test_core_slimming.py b/tests/integration/test_core_slimming.py index b111e796..4b5a420e 100644 --- a/tests/integration/test_core_slimming.py +++ b/tests/integration/test_core_slimming.py @@ -2,6 +2,7 @@ from __future__ import annotations +from collections.abc import Generator from pathlib import Path from unittest.mock import MagicMock, patch @@ -23,7 +24,7 @@ @pytest.fixture(autouse=True) -def _reset_registry(): +def _reset_registry() -> Generator[None, None, None]: """Reset registry before each test so bootstrap state is predictable.""" CommandRegistry._clear_for_testing() yield diff --git a/tests/unit/cli/test_lean_help_output.py b/tests/unit/cli/test_lean_help_output.py index af864f08..33b8ad2c 100644 --- a/tests/unit/cli/test_lean_help_output.py +++ b/tests/unit/cli/test_lean_help_output.py @@ -81,9 +81,25 @@ def test_root_group_unknown_bundle_command_shows_install_guidance(capsys: pytest assert exc_info.value.code == 1 captured = capsys.readouterr() - assert "Command 'backlog' is not installed." in captured.out - assert "specfact init --profile " in captured.out - assert "module install " in captured.out + out = " ".join(captured.out.split()) + assert "Module 'nold-ai/specfact-backlog' is not installed." in out + assert "specfact module install nold-ai/specfact-backlog" in out + assert "specfact init --profile " in out + + +def test_root_group_unknown_code_shows_specfact_codebase_module(capsys: pytest.CaptureFixture[str]) -> None: + """Missing `code` group should name nold-ai/specfact-codebase (not the VS Code `code` CLI).""" + group = _RootCLIGroup(name="specfact") + ctx = click.Context(group) + + with pytest.raises(SystemExit) as exc_info: + group.resolve_command(ctx, ["code", "--help"]) + + assert exc_info.value.code == 1 + captured = capsys.readouterr() + out = " ".join(captured.out.split()) + assert "Module 'nold-ai/specfact-codebase' is not installed." in out + assert "specfact module install nold-ai/specfact-codebase" in out def test_specfact_help_with_all_bundles_installed_shows_eight_commands( diff --git a/tests/unit/docs/test_wow_entrypoint_contract.py b/tests/unit/docs/test_wow_entrypoint_contract.py new file mode 100644 index 00000000..d4a9f026 --- /dev/null +++ b/tests/unit/docs/test_wow_entrypoint_contract.py @@ -0,0 +1,77 @@ +"""Contract tests: README and docs landing must match the canonical uvx \"wow\" entry path. + +The wow path is the primary onboarding surface (init + code review with --scope full). +""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + + +REPO_ROOT = Path(__file__).resolve().parents[3] +README = REPO_ROOT / "README.md" +DOCS_INDEX = REPO_ROOT / "docs" / "index.md" + +# Canonical strings β€” keep in sync with docs/index.md hero and README "Start Here". +UVX_INIT = "uvx specfact-cli init --profile solo-developer" +UVX_REVIEW = "uvx specfact-cli code review run --path . --scope full" +INSTALLED_INIT = "specfact init --profile solo-developer" +INSTALLED_REVIEW = "specfact code review run --path . --scope full" + + +@pytest.fixture(scope="module", autouse=True) +def _require_files() -> None: + if not README.is_file(): + pytest.skip(f"README.md missing at {README}", allow_module_level=True) + if not DOCS_INDEX.is_file(): + pytest.skip(f"docs/index.md missing at {DOCS_INDEX}", allow_module_level=True) + + +def _read(p: Path) -> str: + return p.read_text(encoding="utf-8") + + +def test_readme_and_docs_index_include_identical_uvx_wow_commands() -> None: + """Hero commands in README and docs/index.md must not drift.""" + readme = _read(README) + docs = _read(DOCS_INDEX) + for needle in (UVX_INIT, UVX_REVIEW): + assert needle in readme, f"README.md must contain {needle!r}" + assert needle in docs, f"docs/index.md must contain {needle!r}" + + +def test_readme_documents_pip_free_alternate_and_scope_full_rationale() -> None: + """README explains --scope full and the installed-CLI equivalent.""" + readme = _read(README) + assert "--scope full" in readme + assert INSTALLED_INIT in readme and INSTALLED_REVIEW in readme + assert "Verdict" in readme and "Score" in readme and "findings" in readme.lower() + + +def test_readme_wow_section_appears_before_choose_your_path() -> None: + """Primary entry content must appear before outcome routing.""" + readme = _read(README) + wow = readme.find("uvx specfact-cli init --profile solo-developer") + choose = readme.find("## Choose Your Path") + assert wow != -1 and choose != -1 + assert wow < choose + + +def test_docs_index_wow_block_precedes_what_is_specfact() -> None: + """Landing page leads with the runnable block before deep product copy.""" + docs = _read(DOCS_INDEX) + block = docs.find(UVX_INIT) + heading = docs.find("## What is SpecFact?") + assert block != -1 and heading != -1 + assert block < heading + + +def test_readme_start_here_precedes_documentation_topology() -> None: + """Fast-start remains above internal docs topology (existing contract).""" + readme = _read(README) + start = readme.find("### Start Here") + topo = readme.find("## Documentation Topology") + assert start != -1 and topo != -1 + assert start < topo diff --git a/tests/unit/importers/test_speckit_converter.py b/tests/unit/importers/test_speckit_converter.py index 6e2575b3..53d9f4d9 100644 --- a/tests/unit/importers/test_speckit_converter.py +++ b/tests/unit/importers/test_speckit_converter.py @@ -119,7 +119,8 @@ def test_generate_github_action(self, tmp_path: Path) -> None: # Verify workflow content (business logic) content = output_path.read_text() assert "SpecFact CLI Validation" in content - assert "specfact repro" in content + assert "specfact code repro" in content + assert "specfact init --profile solo-developer" in content def test_convert_to_speckit_sequential_numbering(self, tmp_path: Path) -> None: """Test convert_to_speckit uses sequential numbering when feature keys lack numbers.""" diff --git a/tests/unit/modules/init/test_first_run_selection.py b/tests/unit/modules/init/test_first_run_selection.py index 309b392d..f2140c19 100644 --- a/tests/unit/modules/init/test_first_run_selection.py +++ b/tests/unit/modules/init/test_first_run_selection.py @@ -27,9 +27,9 @@ def _telemetry_track_context(): # --- Profile resolution --- -def test_profile_solo_developer_resolves_to_specfact_codebase_only() -> None: +def test_profile_solo_developer_resolves_to_codebase_and_code_review() -> None: bundles = frs.resolve_profile_bundles("solo-developer") - assert bundles == ["specfact-codebase"] + assert bundles == ["specfact-codebase", "specfact-code-review"] def test_profile_enterprise_full_stack_resolves_to_all_five_bundles() -> None: @@ -133,7 +133,7 @@ def _discover(_builtin=None, user_root=None, **_kwargs): # --- CLI: specfact init --profile (mock installer) --- -def test_init_profile_solo_developer_calls_installer_with_specfact_codebase( +def test_init_profile_solo_developer_calls_installer_with_codebase_and_code_review( monkeypatch: pytest.MonkeyPatch, tmp_path: Path ) -> None: install_calls: list[list[str]] = [] @@ -160,7 +160,7 @@ def _fake_install_bundles(bundle_ids: list[str], install_root: Path, **kwargs: o ) assert result.exit_code == 0, result.output assert len(install_calls) == 1 - assert install_calls[0] == ["specfact-codebase"] + assert install_calls[0] == ["specfact-codebase", "specfact-code-review"] def test_init_profile_enterprise_full_stack_calls_installer_with_all_five( @@ -389,19 +389,16 @@ def _fake_install(bundle_ids: list[str], install_root: Path, **kwargs: object) - def test_spec_bundle_install_includes_project_dep(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: - installed_modules: list[str] = [] + installed_ids: list[str] = [] - def _record_install(module_name: str, target_root: Path, **kwargs: object) -> bool: - installed_modules.append(module_name) - return True + def _record_marketplace(module_id: str, **kwargs: object) -> Path: + installed_ids.append(module_id) + return tmp_path / module_id.split("/")[1] monkeypatch.setattr( - "specfact_cli.registry.module_installer.install_bundled_module", - _record_install, + "specfact_cli.registry.module_installer.install_module", + _record_marketplace, ) - frs.install_bundles_for_init(["specfact-spec"], install_root=tmp_path) - project_module_names = set(frs.BUNDLE_TO_MODULE_NAMES.get("specfact-project", [])) - spec_module_names = set(frs.BUNDLE_TO_MODULE_NAMES.get("specfact-spec", [])) - installed_set = set(installed_modules) - assert project_module_names & installed_set, "spec bundle must trigger project bundle dep install" - assert spec_module_names & installed_set, "spec bundle modules must be installed" + frs.install_bundles_for_init(["specfact-spec"], install_root=tmp_path, show_progress=False) + assert "nold-ai/specfact-project" in installed_ids, "spec bundle depends on project marketplace module" + assert "nold-ai/specfact-spec" in installed_ids diff --git a/tests/unit/modules/module_registry/test_commands.py b/tests/unit/modules/module_registry/test_commands.py index 14a254eb..3e381631 100644 --- a/tests/unit/modules/module_registry/test_commands.py +++ b/tests/unit/modules/module_registry/test_commands.py @@ -1055,7 +1055,7 @@ def _install(module_id: str, version=None, reinstall: bool = False): result = runner.invoke(app, ["upgrade"]) assert result.exit_code == 0 - assert installed == ["specfact/backlog"] + assert installed == ["nold-ai/specfact-backlog"] assert reinstall_flags == [True] assert "Upgraded" in result.stdout @@ -1104,6 +1104,47 @@ def test_upgrade_rejects_non_marketplace_source(monkeypatch) -> None: assert "marketplace modules" in result.stdout and "upgradeable" in result.stdout +def test_upgrade_rejects_multi_segment_module_id(monkeypatch, tmp_path: Path) -> None: + """Malformed owner/repo/extra must not resolve via last-segment fallback to a different module.""" + installed: list[str] = [] + + def _install(module_id: str, version=None, reinstall: bool = False): + installed.append(module_id) + return tmp_path / module_id.split("/")[-1] + + monkeypatch.setattr( + "specfact_cli.modules.module_registry.src.commands.install_module", + _install, + ) + monkeypatch.setattr( + "specfact_cli.modules.module_registry.src.commands.get_modules_with_state", + lambda: [ + {"id": "nold-ai/specfact-backlog", "version": "0.2.0", "enabled": True, "source": "marketplace"}, + ], + ) + + result = runner.invoke(app, ["upgrade", "foo/bar/backlog"]) + + assert result.exit_code == 1 + assert not installed + assert "Invalid module id" in result.stdout + assert "multi-segment" in result.stdout + + +def test_upgrade_row_for_target_does_not_match_last_segment_for_multi_slash_ids() -> None: + from specfact_cli.modules.module_registry.src.commands import _upgrade_row_for_target + + by_id = {"nold-ai/specfact-backlog": {"version": "1", "source": "marketplace"}} + assert _upgrade_row_for_target("foo/bar/backlog", by_id) == {} + + +def test_full_marketplace_module_id_for_install_rejects_multi_segment_path() -> None: + from specfact_cli.modules.module_registry.src.commands import _full_marketplace_module_id_for_install + + with pytest.raises(ValueError, match="multi-segment"): + _full_marketplace_module_id_for_install("foo/bar/backlog") + + def test_enable_command_updates_state_with_dependency_checks(monkeypatch) -> None: captured = {"enable_ids": None, "disable_ids": None, "force": None} diff --git a/tests/unit/registry/test_category_groups.py b/tests/unit/registry/test_category_groups.py index 838b17dc..71a3064c 100644 --- a/tests/unit/registry/test_category_groups.py +++ b/tests/unit/registry/test_category_groups.py @@ -46,24 +46,36 @@ def test_bootstrap_with_category_grouping_enabled_registers_group_commands() -> } assert set(names).issubset(allowed), f"Unexpected root commands found: {sorted(set(names) - allowed)}" assert {"init", "module", "upgrade"}.issubset(set(names)) + if "code" in names: + assert {"project", "spec"} <= set(names), ( + "When the code category group is mounted, project and spec groups must register too." + ) assert not (set(names) & forbidden_flat), ( f"Flat shims should not be registered: {sorted(set(names) & forbidden_flat)}" ) -def test_bootstrap_with_category_grouping_disabled_registers_flat_commands() -> None: - """With category grouping disabled, grouped aliases are not mounted via category grouping.""" +def test_bootstrap_with_category_grouping_disabled_still_has_no_flat_shims() -> None: + """Flat bundle shims are not registered even when SPECFACT_CATEGORY_GROUPING_ENABLED is false.""" with patch.dict(os.environ, {"SPECFACT_CATEGORY_GROUPING_ENABLED": "false"}, clear=False): register_builtin_commands() rebuild_root_app_from_registry() names = [name for name, _ in CommandRegistry.list_commands_for_help()] - # Skip assertions if bundles aren't installed (e.g., in CI without modules) - if "code" not in names: - pytest.skip("Codebase bundle not installed; skipping bundle-native command assertions") - assert "code" in names, "Bundle-native root command 'code' should remain available when grouping is disabled" - assert "govern" in names, "Bundle-native root command 'govern' should remain available when grouping is disabled" - assert "project" in names - assert "spec" in names + forbidden_flat = { + "analyze", + "drift", + "validate", + "repro", + "import", + "plan", + "sync", + "migrate", + } + assert not (set(names) & forbidden_flat), ( + f"Flat shims must not be registered: {sorted(set(names) & forbidden_flat)}" + ) + if "code" in names: + assert "project" in names and "spec" in names def test_code_analyze_routes_same_as_flat_analyze( diff --git a/tests/unit/registry/test_dependency_resolver.py b/tests/unit/registry/test_dependency_resolver.py index 0f28037f..44b5a3f7 100644 --- a/tests/unit/registry/test_dependency_resolver.py +++ b/tests/unit/registry/test_dependency_resolver.py @@ -2,13 +2,15 @@ from __future__ import annotations -from unittest.mock import patch +from unittest.mock import MagicMock, patch import pytest from specfact_cli.models.module_package import ModulePackageMetadata, VersionedPipDependency from specfact_cli.registry.dependency_resolver import ( DependencyConflictError, + PipDependencyInstallError, + install_resolved_pip_requirements, resolve_dependencies, ) @@ -135,3 +137,50 @@ def test_clear_error_messages_for_conflicts( msg = str(exc_info.value) assert "requests" in msg assert "Suggest" in msg or "force" in msg or "skip-deps" in msg + + +class TestInstallResolvedPipRequirements: + """Tests for install_resolved_pip_requirements.""" + + def test_no_op_when_empty(self) -> None: + with patch("specfact_cli.registry.dependency_resolver.subprocess.run") as mock_run: + install_resolved_pip_requirements([]) + mock_run.assert_not_called() + + def test_invokes_pip_install_with_pins(self) -> None: + ok = MagicMock() + ok.returncode = 0 + with ( + patch("specfact_cli.registry.dependency_resolver._pip_module_available", return_value=True), + patch("specfact_cli.registry.dependency_resolver.subprocess.run") as mock_run, + ): + mock_run.return_value = ok + install_resolved_pip_requirements(["requests==2.31.0", "pydantic==2.5.0"]) + mock_run.assert_called_once() + cmd = mock_run.call_args[0][0] + assert "pip" in cmd + assert "install" in cmd + assert "--no-input" in cmd + assert "requests==2.31.0" in cmd + assert "pydantic==2.5.0" in cmd + + def test_skips_when_pip_module_unavailable(self) -> None: + with ( + patch("specfact_cli.registry.dependency_resolver._pip_module_available", return_value=False), + patch("specfact_cli.registry.dependency_resolver.subprocess.run") as mock_run, + ): + install_resolved_pip_requirements(["x==1"]) + mock_run.assert_not_called() + + def test_raises_on_pip_failure(self) -> None: + bad = MagicMock() + bad.returncode = 1 + bad.stderr = "boom" + bad.stdout = "" + with ( + patch("specfact_cli.registry.dependency_resolver._pip_module_available", return_value=True), + patch("specfact_cli.registry.dependency_resolver.subprocess.run") as mock_run, + ): + mock_run.return_value = bad + with pytest.raises(PipDependencyInstallError): + install_resolved_pip_requirements(["x==1"]) diff --git a/tests/unit/registry/test_module_bridge_registration.py b/tests/unit/registry/test_module_bridge_registration.py index 93ab6f1d..95877b90 100644 --- a/tests/unit/registry/test_module_bridge_registration.py +++ b/tests/unit/registry/test_module_bridge_registration.py @@ -39,7 +39,6 @@ def test_register_module_package_commands_registers_declared_bridges(monkeypatch monkeypatch.setattr(module_packages, "verify_module_artifact", lambda _dir, _meta, allow_unsigned=False: True) monkeypatch.setattr(module_packages, "read_modules_state", dict) monkeypatch.setattr(module_packages, "_make_package_loader", lambda *_args: object) - monkeypatch.setattr(module_packages, "_load_package_module", lambda *_args: object()) monkeypatch.setattr(module_packages, "BRIDGE_REGISTRY", registry, raising=False) module_packages.register_module_package_commands() @@ -56,7 +55,6 @@ def test_invalid_bridge_declaration_is_non_fatal(monkeypatch, tmp_path: Path) -> monkeypatch.setattr(module_packages, "verify_module_artifact", lambda _dir, _meta, allow_unsigned=False: True) monkeypatch.setattr(module_packages, "read_modules_state", dict) monkeypatch.setattr(module_packages, "_make_package_loader", lambda *_args: object) - monkeypatch.setattr(module_packages, "_load_package_module", lambda *_args: object()) monkeypatch.setattr(module_packages, "BRIDGE_REGISTRY", registry, raising=False) module_packages.register_module_package_commands() diff --git a/tests/unit/registry/test_module_installer.py b/tests/unit/registry/test_module_installer.py index e17aab89..56ba1827 100644 --- a/tests/unit/registry/test_module_installer.py +++ b/tests/unit/registry/test_module_installer.py @@ -21,6 +21,10 @@ def _no_op_resolve_dependencies(monkeypatch: pytest.MonkeyPatch) -> None: "specfact_cli.registry.module_installer.resolve_dependencies", lambda *_a, **_k: None, ) + monkeypatch.setattr( + "specfact_cli.registry.module_installer.install_resolved_pip_requirements", + lambda *_a, **_k: None, + ) def _create_module_tarball( @@ -118,6 +122,9 @@ def test_install_module_logs_satisfied_dependencies_without_warning(monkeypatch, "specfact_cli.registry.module_installer.ensure_publisher_trusted", lambda *_args, **_kwargs: None ) monkeypatch.setattr("specfact_cli.registry.module_installer.resolve_dependencies", lambda *_args, **_kwargs: None) + monkeypatch.setattr( + "specfact_cli.registry.module_installer.install_resolved_pip_requirements", lambda *_args, **_kwargs: None + ) monkeypatch.setattr("specfact_cli.registry.module_installer.discover_all_modules", list) mock_logger = MagicMock() @@ -218,7 +225,7 @@ def test_install_module_validates_core_compatibility(monkeypatch, tmp_path: Path tarball = _create_module_tarball(tmp_path, "policy", core_compatibility=">=9.0.0") monkeypatch.setattr("specfact_cli.registry.module_installer.download_module", lambda *_args, **_kwargs: tarball) - with pytest.raises(ValueError, match="incompatible with current SpecFact CLI version"): + with pytest.raises(ValueError, match="requires SpecFact CLI"): install_module("specfact/policy", install_root=tmp_path / "marketplace-modules") diff --git a/tests/unit/registry/test_module_protocol_validation.py b/tests/unit/registry/test_module_protocol_validation.py index b1d5b603..ae81d611 100644 --- a/tests/unit/registry/test_module_protocol_validation.py +++ b/tests/unit/registry/test_module_protocol_validation.py @@ -2,7 +2,17 @@ from __future__ import annotations -from specfact_cli.registry.module_packages import _check_protocol_compliance, _check_schema_compatibility +from typing import Any + +from specfact_cli.registry.module_packages import PROTOCOL_METHODS, _check_schema_compatibility + + +def _protocol_operations_for_class(module_class: Any) -> list[str]: + operations: list[str] = [] + for operation, method_name in PROTOCOL_METHODS.items(): + if hasattr(module_class, method_name): + operations.append(operation) + return operations class FullProtocolModule: @@ -33,22 +43,22 @@ def run(self): def test_discovery_detects_protocol_implementation() -> None: - operations = _check_protocol_compliance(FullProtocolModule) + operations = _protocol_operations_for_class(FullProtocolModule) assert set(operations) == {"import", "export", "sync", "validate"} def test_full_protocol_logged() -> None: - operations = _check_protocol_compliance(FullProtocolModule) + operations = _protocol_operations_for_class(FullProtocolModule) assert len(operations) == 4 def test_partial_protocol_logged() -> None: - operations = _check_protocol_compliance(PartialProtocolModule) + operations = _protocol_operations_for_class(PartialProtocolModule) assert set(operations) == {"import", "validate"} def test_no_protocol_legacy_mode() -> None: - operations = _check_protocol_compliance(LegacyModule) + operations = _protocol_operations_for_class(LegacyModule) assert operations == [] diff --git a/tests/unit/scripts/test_pre_commit_smart_checks_docs.py b/tests/unit/scripts/test_pre_commit_smart_checks_docs.py index d8bf8c15..5688de23 100644 --- a/tests/unit/scripts/test_pre_commit_smart_checks_docs.py +++ b/tests/unit/scripts/test_pre_commit_smart_checks_docs.py @@ -30,3 +30,6 @@ def test_pre_commit_runs_code_review_gate_before_contract_tests() -> None: assert "run_code_review_gate" in script assert "hatch run python scripts/pre_commit_code_review.py" in script assert "run_code_review_gate\n\n# Contract-first test flow" in script + # Single invocation with all staged files β€” xargs can split into multiple runs and + # clobber .specfact/code-review.json (partial or empty findings). + assert '"${py_array[@]}"' in script diff --git a/tests/unit/specfact_cli/modules/test_module_upgrade_improvements.py b/tests/unit/specfact_cli/modules/test_module_upgrade_improvements.py new file mode 100644 index 00000000..d55adc7c --- /dev/null +++ b/tests/unit/specfact_cli/modules/test_module_upgrade_improvements.py @@ -0,0 +1,304 @@ +"""Tests for module upgrade command improvements. + +Spec: openspec/changes/docs-new-user-onboarding/specs/module-installation/spec.md +Tasks: 7b.1 - 7b.13 +""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock, patch + +import click +import pytest +from typer.testing import CliRunner + +from specfact_cli.modules.module_registry.src.commands import ( + _resolve_one_upgrade_name, + _run_marketplace_upgrades, + app as module_app, +) + + +runner = CliRunner() + + +def _unstyled(text: str) -> str: + return click.unstyle(text) + + +# ── Scenario: Upgrade when module is already at latest version (no X->X) ────── + + +def test_run_marketplace_upgrades_skips_reinstall_when_at_latest(tmp_path: Path) -> None: + """When latest_version == current_version, module must NOT be reinstalled and must NOT appear in 'Upgraded:' with X->X.""" + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": { + "version": "0.41.16", + "source": "marketplace", + "latest_version": "0.41.16", + } + } + + install_called = [] + + def _fake_install(module_id: str, reinstall: bool = False, **kwargs: object) -> Path: + install_called.append(module_id) + return tmp_path / "backlog" + + with patch("specfact_cli.modules.module_registry.src.commands.install_module", side_effect=_fake_install): + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, {}) + + assert not install_called, "install_module must NOT be called when module is already at latest version" + + +def test_run_marketplace_upgrades_all_at_latest_prints_up_to_date( + tmp_path: Path, capsys: pytest.CaptureFixture[str] +) -> None: + """When all modules are at latest, output must say 'All modules are up to date' and no X->X lines.""" + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": {"version": "0.41.16", "source": "marketplace", "latest_version": "0.41.16"}, + "nold-ai/specfact-codebase": {"version": "0.44.0", "source": "marketplace", "latest_version": "0.44.0"}, + } + + with patch("specfact_cli.modules.module_registry.src.commands.install_module") as mock_install: + from io import StringIO + + output_buf = StringIO() + from rich.console import Console + + test_console = Console(file=output_buf, highlight=False, markup=True) + with patch("specfact_cli.modules.module_registry.src.commands.console", test_console): + _run_marketplace_upgrades(["nold-ai/specfact-backlog", "nold-ai/specfact-codebase"], by_id, {}) + + output = output_buf.getvalue() + + mock_install.assert_not_called() + assert "0.41.16 -> 0.41.16" not in output, "Must not show X->X lines when nothing changed" + assert "0.44.0 -> 0.44.0" not in output, "Must not show X->X lines when nothing changed" + + +def test_run_marketplace_upgrades_mixed_result_shows_sections(tmp_path: Path) -> None: + """With mixed results, output has 'Upgraded:' and 'Already up to date:' sections.""" + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": {"version": "0.41.16", "source": "marketplace", "latest_version": "0.42.0"}, + "nold-ai/specfact-codebase": {"version": "0.44.0", "source": "marketplace", "latest_version": "0.44.0"}, + } + + def _fake_install(module_id: str, reinstall: bool = False, **kwargs: object) -> Path: + if "backlog" in module_id: + return tmp_path / "backlog" + raise AssertionError(f"Should not install {module_id}") + + def _fake_read_version(module_dir: Path) -> str: + if "backlog" in str(module_dir): + return "0.42.0" + return "0.44.0" + + from io import StringIO + + from rich.console import Console + + output_buf = StringIO() + test_console = Console(file=output_buf, highlight=False, markup=True) + + with ( + patch("specfact_cli.modules.module_registry.src.commands.install_module", side_effect=_fake_install), + patch( + "specfact_cli.modules.module_registry.src.commands._read_installed_module_version", + side_effect=_fake_read_version, + ), + patch("specfact_cli.modules.module_registry.src.commands.console", test_console), + ): + _run_marketplace_upgrades(["nold-ai/specfact-backlog", "nold-ai/specfact-codebase"], by_id, {}) + + output = output_buf.getvalue() + assert "Upgraded" in output, "Must have Upgraded section" + assert "up to date" in output.lower(), "Must have 'Already up to date' section" + assert "0.41.16 -> 0.42.0" in output or "backlog" in output + + +# ── Scenario: Upgrade multiple named modules selectively ────────────────────── + + +def test_upgrade_command_accepts_multiple_module_names(tmp_path: Path) -> None: + """upgrade command must accept multiple positional module names.""" + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.get_modules_with_state", + return_value=[ + { + "id": "nold-ai/specfact-backlog", + "version": "0.41.16", + "source": "marketplace", + "latest_version": "0.42.0", + }, + { + "id": "nold-ai/specfact-codebase", + "version": "0.44.0", + "source": "marketplace", + "latest_version": "0.44.0", + }, + ], + ), + patch( + "specfact_cli.modules.module_registry.src.commands._run_marketplace_upgrades", + ), + patch("specfact_cli.modules.module_registry.src.commands._resolve_upgrade_target_ids") as mock_resolve, + ): + mock_resolve.return_value = ["nold-ai/specfact-backlog", "nold-ai/specfact-codebase"] + result = runner.invoke(module_app, ["upgrade", "backlog", "codebase"]) + + # Should not show "No such argument" error + assert "No such argument" not in _unstyled(result.output), result.output + # May succeed (exit 0) or fail for other reasons, but not because of wrong arg count + assert result.exit_code != 2, f"Exit code 2 suggests wrong args: {result.output}" + + +# ── Scenario: Breaking major version upgrade requires confirmation ───────────── + + +def test_run_marketplace_upgrades_prompts_for_major_bump(tmp_path: Path) -> None: + """_run_marketplace_upgrades must prompt before upgrading when major version increases.""" + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": {"version": "0.41.16", "source": "marketplace", "latest_version": "1.0.0"}, + } + + from io import StringIO + + from rich.console import Console + + output_buf = StringIO() + test_console = Console(file=output_buf, highlight=False, markup=True) + + prompt_shown = [] + + def _fake_confirm(message: str, **kwargs: object) -> bool: + prompt_shown.append(message) + return False # User declines + + with ( + patch("specfact_cli.modules.module_registry.src.commands.console", test_console), + patch("specfact_cli.modules.module_registry.src.commands.typer.confirm", side_effect=_fake_confirm), + patch("specfact_cli.modules.module_registry.src.commands.install_module") as mock_install, + ): + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, {}) + + output = output_buf.getvalue() + # Must show major bump warning + assert "major" in output.lower() or prompt_shown, "Must warn about major version bump" + mock_install.assert_not_called() # User declined β†’ must not install + + +def test_run_marketplace_upgrades_skips_major_in_ci_mode(tmp_path: Path) -> None: + """In CI/CD (non-interactive), major bumps are skipped with a warning; install is not called.""" + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": {"version": "0.41.16", "source": "marketplace", "latest_version": "1.0.0"}, + } + + with ( + patch("specfact_cli.modules.module_registry.src.commands.is_non_interactive", return_value=True), + patch("specfact_cli.modules.module_registry.src.commands.install_module") as mock_install, + ): + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, {}, yes=False) + + mock_install.assert_not_called() + + +def test_run_marketplace_upgrades_yes_flag_skips_major_bump_prompt(tmp_path: Path) -> None: + """With yes=True, major version bumps proceed without prompt.""" + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": {"version": "0.41.16", "source": "marketplace", "latest_version": "1.0.0"}, + } + + def _fake_install(module_id: str, **kwargs: object) -> Path: + return tmp_path / "backlog" + + def _fake_read_version(p: Path) -> str: + return "1.0.0" + + with ( + patch("specfact_cli.modules.module_registry.src.commands.install_module", side_effect=_fake_install), + patch( + "specfact_cli.modules.module_registry.src.commands._read_installed_module_version", + side_effect=_fake_read_version, + ), + patch("specfact_cli.modules.module_registry.src.commands.typer.confirm") as mock_confirm, + ): + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, {}, yes=True) + + mock_confirm.assert_not_called() # --yes flag skips prompt + + +def test_upgrade_command_warns_when_registry_unavailable(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: + """When the registry index cannot be fetched, upgrade prints a warning before continuing.""" + monkeypatch.setattr( + "specfact_cli.modules.module_registry.src.commands.fetch_registry_index", + lambda **_: None, + ) + monkeypatch.setattr( + "specfact_cli.modules.module_registry.src.commands.get_modules_with_state", + lambda: [{"id": "backlog", "version": "0.2.0", "enabled": True, "source": "marketplace"}], + ) + + def _install(module_id: str, **kwargs: object) -> Path: + return tmp_path / "backlog" + + monkeypatch.setattr( + "specfact_cli.modules.module_registry.src.commands.install_module", + _install, + ) + monkeypatch.setattr( + "specfact_cli.modules.module_registry.src.commands._read_installed_module_version", + lambda _p: "0.2.0", + ) + result = runner.invoke(module_app, ["upgrade", "backlog"]) + assert result.exit_code == 0 + out = (result.stdout or "") + (result.stderr or "") + assert "unavailable" in out.lower() or "network error" in out.lower() + + +def test_run_marketplace_upgrades_calls_console_status_when_spinner_enabled( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path +) -> None: + """When not in pytest/test mode, install path uses ``console.status`` for feedback.""" + monkeypatch.delenv("PYTEST_CURRENT_TEST", raising=False) + monkeypatch.delenv("TEST_MODE", raising=False) + + mock_console = MagicMock() + mock_ctx = MagicMock() + mock_ctx.__enter__ = MagicMock(return_value=None) + mock_ctx.__exit__ = MagicMock(return_value=None) + mock_console.status = MagicMock(return_value=mock_ctx) + + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": {"version": "0.1.0", "source": "marketplace"}, + } + + def _fake_install(module_id: str, **kwargs: object) -> Path: + return tmp_path / "m" + + with ( + patch("specfact_cli.modules.module_registry.src.commands.console", mock_console), + patch("specfact_cli.modules.module_registry.src.commands.install_module", side_effect=_fake_install), + patch( + "specfact_cli.modules.module_registry.src.commands._read_installed_module_version", + return_value="0.2.0", + ), + ): + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, {}) + + mock_console.status.assert_called() + + +def test_resolve_one_upgrade_name_accepts_namespaced_id_when_installed_key_is_short() -> None: + """`specfact module upgrade nold-ai/specfact-backlog` must resolve when list uses short id keys.""" + by_id: dict[str, dict[str, Any]] = { + "specfact-backlog": { + "version": "0.41.16", + "source": "marketplace", + "latest_version": "0.41.16", + } + } + assert _resolve_one_upgrade_name("nold-ai/specfact-backlog", by_id) == "specfact-backlog" diff --git a/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py b/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py new file mode 100644 index 00000000..23c3eb51 --- /dev/null +++ b/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py @@ -0,0 +1,289 @@ +"""Tests for multi-module install and uninstall. + +Spec: openspec/changes/docs-new-user-onboarding/specs/module-installation/spec.md +Tasks: 7c.1 - 7c.9 +""" + +from __future__ import annotations + +from collections.abc import Generator +from dataclasses import dataclass +from pathlib import Path +from typing import Any +from unittest.mock import patch + +import click +import pytest +from typer.testing import CliRunner + +from specfact_cli.cli import app, rebuild_root_app_from_registry +from specfact_cli.registry import CommandRegistry +from specfact_cli.registry.bootstrap import register_builtin_commands + + +@pytest.fixture(autouse=True) +def _reset_registry_and_root_app() -> Generator[None, None, None]: + """Other tests clear ``CommandRegistry`` without re-registering; rebuild root ``app`` for Typer.""" + CommandRegistry._clear_for_testing() + register_builtin_commands() + rebuild_root_app_from_registry() + yield + CommandRegistry._clear_for_testing() + register_builtin_commands() + rebuild_root_app_from_registry() + + +runner = CliRunner() + + +@dataclass +class MockMetadata: + name: str + + +@dataclass +class MockEntry: + metadata: MockMetadata + source: str + + +def _unstyled(text: str) -> str: + return click.unstyle(text) + + +# ── Scenario: Multi-install ──────────────────────────────────────────────────── + + +def test_module_install_accepts_multiple_ids() -> None: + """specfact module install A B must accept two positional arguments.""" + installed: list[str] = [] + + def _fake_install(module_id: str, **kwargs: object) -> Path: + installed.append(module_id) + return Path(f"/tmp/{module_id.split('/')[1]}") + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.install_module", + side_effect=_fake_install, + ), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=[], + ), + patch( + "specfact_cli.modules.module_registry.src.commands._install_skip_if_already_satisfied", + return_value=False, + ), + patch( + "specfact_cli.modules.module_registry.src.commands._try_install_bundled_module", + return_value=False, + ), + ): + result = runner.invoke(app, ["module", "install", "nold-ai/specfact-codebase", "nold-ai/specfact-code-review"]) + + output = _unstyled(result.output) + assert result.exit_code != 2, f"Exit code 2 = CLI arg error; got: {output}" + assert "nold-ai/specfact-codebase" in installed or "specfact-codebase" in str(installed), ( + f"Both modules should be installed; installed={installed}" + ) + assert "nold-ai/specfact-code-review" in installed or "specfact-code-review" in str(installed) + + +def test_module_install_rejects_version_with_multiple_module_ids() -> None: + """--version is only valid with a single module id.""" + result = runner.invoke( + app, + [ + "module", + "install", + "nold-ai/specfact-codebase", + "nold-ai/specfact-code-review", + "--version", + "1.0.0", + ], + ) + assert result.exit_code == 1 + out = _unstyled(result.output).lower() + assert "single" in out and "version" in out + + +def test_module_install_single_still_works() -> None: + """Single-module install must still work after multi-install change.""" + installed: list[str] = [] + + def _fake_install(module_id: str, **kwargs: object) -> Path: + installed.append(module_id) + return Path(f"/tmp/{module_id.split('/')[1]}") + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.install_module", + side_effect=_fake_install, + ), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=[], + ), + patch( + "specfact_cli.modules.module_registry.src.commands._install_skip_if_already_satisfied", + return_value=False, + ), + patch( + "specfact_cli.modules.module_registry.src.commands._try_install_bundled_module", + return_value=False, + ), + ): + result = runner.invoke(app, ["module", "install", "nold-ai/specfact-codebase"]) + + assert result.exit_code != 2, f"Exit code 2 = CLI arg error: {_unstyled(result.output)}" + assert len(installed) == 1 + + +def test_module_install_multi_aborts_on_first_failure_without_installing_rest() -> None: + """Multi-install: if module A fails, do not attempt B (avoid partial surprise state).""" + installed: list[str] = [] + + def _fake_install(module_id: str, **kwargs: object) -> Path: + if "codebase" in module_id: + raise RuntimeError("mock install failure for first module") + installed.append(module_id) + return Path("/tmp/ok") + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.install_module", + side_effect=_fake_install, + ), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=[], + ), + patch( + "specfact_cli.modules.module_registry.src.commands._install_skip_if_already_satisfied", + return_value=False, + ), + patch( + "specfact_cli.modules.module_registry.src.commands._try_install_bundled_module", + return_value=False, + ), + ): + result = runner.invoke( + app, + ["module", "install", "nold-ai/specfact-codebase", "nold-ai/specfact-code-review"], + ) + + assert result.exit_code == 1 + assert installed == [], "Second module must not install after first fails" + + +def test_module_install_multi_skips_already_installed_and_continues() -> None: + """Multi-install: if A is already installed, skip A but still install B; exit 0.""" + installed: list[str] = [] + + def _fake_skip(scope: str, name: str, root: Path, reinstall: bool, discovered: Any) -> bool: + return "codebase" in name # A is already installed + + def _fake_install(module_id: str, **kwargs: object) -> Path: + installed.append(module_id) + return Path(f"/tmp/{module_id.split('/')[1]}") + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.install_module", + side_effect=_fake_install, + ), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=[], + ), + patch( + "specfact_cli.modules.module_registry.src.commands._install_skip_if_already_satisfied", + side_effect=_fake_skip, + ), + patch( + "specfact_cli.modules.module_registry.src.commands._try_install_bundled_module", + return_value=False, + ), + ): + result = runner.invoke(app, ["module", "install", "nold-ai/specfact-codebase", "nold-ai/specfact-code-review"]) + + assert result.exit_code == 0, f"Should exit 0 when only one is skipped: {_unstyled(result.output)}" + assert any("code-review" in mid for mid in installed), "B must still be installed even if A was skipped" + + +# ── Scenario: Multi-uninstall ───────────────────────────────────────────────── + + +def test_module_uninstall_accepts_multiple_names() -> None: + """specfact module uninstall A B must accept two positional arguments.""" + uninstalled: list[str] = [] + + def _fake_uninstall(module_name: str, **kwargs: object) -> None: + uninstalled.append(module_name) + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.uninstall_module", + side_effect=_fake_uninstall, + ), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=[ + MockEntry(MockMetadata("specfact-codebase"), "marketplace"), + MockEntry(MockMetadata("specfact-code-review"), "marketplace"), + ], + ), + ): + result = runner.invoke(app, ["module", "uninstall", "specfact-codebase", "specfact-code-review"]) + + output = _unstyled(result.output) + assert result.exit_code != 2, f"Exit code 2 = CLI arg error: {output}" + + +def test_module_uninstall_single_still_works() -> None: + """Single-module uninstall must still work after multi-uninstall change.""" + with ( + patch("specfact_cli.modules.module_registry.src.commands.uninstall_module"), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=[ + MockEntry(MockMetadata("specfact-codebase"), "marketplace"), + ], + ), + ): + result = runner.invoke(app, ["module", "uninstall", "specfact-codebase"]) + + assert result.exit_code != 2, f"Exit code 2 = CLI arg error: {_unstyled(result.output)}" + + +def test_module_uninstall_multi_missing_first_reports_error_still_uninstalls_rest_exits_nonzero() -> None: + """7c.7: If A is not installed, report error, still uninstall B, exit non-zero.""" + uninstalled: list[str] = [] + + def _fake_uninstall(module_name: str, **kwargs: object) -> None: + uninstalled.append(module_name) + + discovered = [ + MockEntry(MockMetadata("specfact-code-review"), "marketplace"), + ] + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.uninstall_module", + side_effect=_fake_uninstall, + ), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=discovered, + ), + ): + result = runner.invoke( + app, + ["module", "uninstall", "specfact-codebase", "specfact-code-review"], + ) + + assert uninstalled == ["specfact-code-review"], ( + "Missing module must not block uninstall of remaining names; got " + repr(uninstalled) + ) + assert result.exit_code == 1, "Overall exit must be non-zero when any name failed" diff --git a/tests/unit/specfact_cli/registry/test_command_registry.py b/tests/unit/specfact_cli/registry/test_command_registry.py index 4999610d..694a304f 100644 --- a/tests/unit/specfact_cli/registry/test_command_registry.py +++ b/tests/unit/specfact_cli/registry/test_command_registry.py @@ -7,6 +7,7 @@ from __future__ import annotations import os +from collections.abc import Generator from pathlib import Path import pytest @@ -30,7 +31,7 @@ def _subprocess_env() -> dict[str, str]: @pytest.fixture(autouse=True) -def _reset_registry(): +def _reset_registry() -> Generator[None, None, None]: """Reset registry before each test so tests are isolated.""" CommandRegistry._clear_for_testing() yield @@ -191,10 +192,13 @@ def test_cli_backlog_help_exits_zero(): ) if result.returncode == 0: return - merged = (result.stdout or "") + "\n" + (result.stderr or "") - assert "Command 'backlog' is not installed." in merged, (result.stdout, result.stderr) + assert result.returncode == 1, (result.stdout, result.stderr) + merged = " ".join(((result.stdout or "") + "\n" + (result.stderr or "")).split()) + assert "Module 'nold-ai/specfact-backlog' is not installed." in merged, (result.stdout, result.stderr) + assert "The backlog command group is provided by that module." in merged, (result.stdout, result.stderr) + assert "specfact module install nold-ai/specfact-backlog" in merged, (result.stdout, result.stderr) assert "specfact init --profile " in merged, (result.stdout, result.stderr) - assert "module install " in merged, (result.stdout, result.stderr) + assert "to install bundles." in merged, (result.stdout, result.stderr) def test_cli_module_help_exits_zero(): diff --git a/tests/unit/specfact_cli/registry/test_dependency_resolver_pip_free.py b/tests/unit/specfact_cli/registry/test_dependency_resolver_pip_free.py new file mode 100644 index 00000000..7d638586 --- /dev/null +++ b/tests/unit/specfact_cli/registry/test_dependency_resolver_pip_free.py @@ -0,0 +1,121 @@ +"""Tests for pip-free dependency resolver fallback. + +Spec: openspec/changes/docs-new-user-onboarding/specs/first-run-selection/spec.md +Bug 2: module install fails under uvx with "No module named pip" +""" + +from __future__ import annotations + +import subprocess +from typing import cast +from unittest.mock import patch + +import pytest + +from specfact_cli.models.module_package import ModulePackageMetadata +from specfact_cli.registry.dependency_resolver import ( + PipDependencyValidationUnavailableError, + _run_basic_resolver, + resolve_dependencies, +) + + +def test_run_basic_resolver_returns_constraints_when_pip_unavailable() -> None: + """When pip is unavailable (uvx environment), basic resolver must not raise β€” return constraints.""" + constraints = ["requests>=2.28.0", "pyyaml>=6.0"] + + def _pip_not_available(*cmd_args: object, **kwargs: object) -> subprocess.CompletedProcess[str]: + return subprocess.CompletedProcess( + args=cast(list[str | bytes], [str(a) for a in cmd_args]), + returncode=1, + stdout="", + stderr="No module named pip", + ) + + with patch("specfact_cli.registry.dependency_resolver.subprocess.run", side_effect=_pip_not_available): + result = _run_basic_resolver(constraints, allow_unvalidated=True) + + # Must not raise; must return something (constraints or empty list) + assert isinstance(result, list), "Should return a list even when pip is unavailable" + + +def test_run_basic_resolver_raises_when_pip_unavailable_without_allow_unvalidated() -> None: + """Without allow_unvalidated, missing pip must not silently skip validation.""" + + def _pip_not_available(*cmd_args: object, **kwargs: object) -> subprocess.CompletedProcess[str]: + return subprocess.CompletedProcess( + args=cast(list[str | bytes], [str(a) for a in cmd_args]), + returncode=1, + stdout="", + stderr="No module named pip", + ) + + with ( + patch("specfact_cli.registry.dependency_resolver.subprocess.run", side_effect=_pip_not_available), + pytest.raises(PipDependencyValidationUnavailableError), + ): + _run_basic_resolver(["requests>=1"], allow_unvalidated=False) + + +def test_resolve_dependencies_does_not_raise_when_pip_unavailable() -> None: + """resolve_dependencies must complete without raising when pip and pip-compile are both unavailable.""" + module = ModulePackageMetadata( + name="test-module", + version="0.1.0", + commands=["test"], + pip_dependencies=["requests>=2.28.0"], + ) + + with ( + patch("specfact_cli.registry.dependency_resolver._pip_tools_available", return_value=False), + patch( + "specfact_cli.registry.dependency_resolver._run_basic_resolver", + return_value=["requests>=2.28.0"], + ) as mock_basic, + ): + result = resolve_dependencies([module]) + + mock_basic.assert_called_once() + assert mock_basic.call_args.kwargs.get("allow_unvalidated") is False + assert isinstance(result, list) + + +def test_resolve_dependencies_passes_allow_unvalidated_to_basic_resolver() -> None: + """Module install path requests unvalidated resolution when pip is missing (uvx).""" + module = ModulePackageMetadata( + name="test-module", + version="0.1.0", + commands=["test"], + pip_dependencies=["requests>=2.28.0"], + ) + + with ( + patch("specfact_cli.registry.dependency_resolver._pip_tools_available", return_value=False), + patch( + "specfact_cli.registry.dependency_resolver._run_basic_resolver", + return_value=["requests>=2.28.0"], + ) as mock_basic, + ): + resolve_dependencies([module], allow_unvalidated=True) + + assert mock_basic.call_args.kwargs.get("allow_unvalidated") is True + + +def test_resolve_dependencies_empty_modules_returns_empty() -> None: + """resolve_dependencies with no pip deps must return [] without calling pip.""" + module = ModulePackageMetadata( + name="no-pip-deps", + version="0.1.0", + commands=["cmd"], + pip_dependencies=[], + ) + with patch("specfact_cli.registry.dependency_resolver._pip_tools_available") as mock_check: + result = resolve_dependencies([module]) + + mock_check.assert_not_called() + assert result == [] + + +def test_basic_resolver_returns_empty_for_empty_constraints() -> None: + result = _run_basic_resolver([]) + assert result == [] diff --git a/tests/unit/specfact_cli/registry/test_help_cache.py b/tests/unit/specfact_cli/registry/test_help_cache.py index 242ca175..c39885dd 100644 --- a/tests/unit/specfact_cli/registry/test_help_cache.py +++ b/tests/unit/specfact_cli/registry/test_help_cache.py @@ -9,6 +9,7 @@ import os import subprocess import sys +from collections.abc import Generator from pathlib import Path import pytest @@ -51,7 +52,7 @@ def registry_dir(tmp_path: Path): @pytest.fixture(autouse=True) -def _reset_registry(): +def _reset_registry() -> Generator[None, None, None]: """Reset registry before each test.""" CommandRegistry._clear_for_testing() yield diff --git a/tests/unit/specfact_cli/registry/test_module_packages.py b/tests/unit/specfact_cli/registry/test_module_packages.py index 77135e45..ac936d0a 100644 --- a/tests/unit/specfact_cli/registry/test_module_packages.py +++ b/tests/unit/specfact_cli/registry/test_module_packages.py @@ -9,6 +9,7 @@ import logging import os +from collections.abc import Generator from pathlib import Path import pytest @@ -34,7 +35,7 @@ @pytest.fixture(autouse=True) -def _reset_registry(): +def _reset_registry() -> Generator[None, None, None]: CommandRegistry._clear_for_testing() yield CommandRegistry._clear_for_testing() diff --git a/tests/unit/specfact_cli/registry/test_profile_presets.py b/tests/unit/specfact_cli/registry/test_profile_presets.py new file mode 100644 index 00000000..91c8544d --- /dev/null +++ b/tests/unit/specfact_cli/registry/test_profile_presets.py @@ -0,0 +1,138 @@ +"""Tests for profile presets and init --profile module installation. + +Spec: openspec/changes/docs-new-user-onboarding/specs/profile-presets/spec.md +Spec: openspec/changes/docs-new-user-onboarding/specs/first-run-selection/spec.md +""" + +from __future__ import annotations + +from pathlib import Path +from unittest.mock import patch + +import pytest + +from specfact_cli.modules.init.src.first_run_selection import ( + CANONICAL_BUNDLES, + PROFILE_PRESETS, + install_bundles_for_init, + resolve_profile_bundles, +) + + +# ── Scenario: Profile canonical bundle mapping is machine-verifiable ────────── + + +def test_solo_developer_includes_specfact_code_review() -> None: + """solo-developer profile MUST include specfact-code-review.""" + bundles = PROFILE_PRESETS["solo-developer"] + assert "specfact-code-review" in bundles, f"solo-developer must include specfact-code-review; got {bundles}" + + +def test_solo_developer_includes_specfact_codebase() -> None: + """solo-developer profile MUST include specfact-codebase.""" + bundles = PROFILE_PRESETS["solo-developer"] + assert "specfact-codebase" in bundles + + +def test_solo_developer_canonical_set() -> None: + """solo-developer canonical set is exactly [specfact-codebase, specfact-code-review].""" + expected = {"specfact-codebase", "specfact-code-review"} + actual = set(PROFILE_PRESETS["solo-developer"]) + assert actual == expected, f"Expected {expected}, got {actual}" + + +def test_specfact_code_review_in_canonical_bundles() -> None: + """specfact-code-review must be in CANONICAL_BUNDLES.""" + assert "specfact-code-review" in CANONICAL_BUNDLES + + +def test_backlog_team_canonical_set() -> None: + expected = {"specfact-project", "specfact-backlog", "specfact-codebase"} + assert set(PROFILE_PRESETS["backlog-team"]) == expected + + +def test_api_first_team_canonical_set() -> None: + expected = {"specfact-spec", "specfact-codebase"} + assert set(PROFILE_PRESETS["api-first-team"]) == expected + + +def test_enterprise_full_stack_canonical_set() -> None: + expected = { + "specfact-project", + "specfact-backlog", + "specfact-codebase", + "specfact-spec", + "specfact-govern", + } + assert set(PROFILE_PRESETS["enterprise-full-stack"]) == expected + + +def test_resolve_profile_bundles_solo_developer() -> None: + bundles = resolve_profile_bundles("solo-developer") + assert "specfact-codebase" in bundles + assert "specfact-code-review" in bundles + + +def test_resolve_profile_bundles_invalid_raises() -> None: + with pytest.raises(ValueError, match="Unknown profile"): + resolve_profile_bundles("unknown-profile") + + +# ── Scenario: install_bundles_for_init installs marketplace modules ──────────── + + +def test_install_bundles_for_init_calls_marketplace_for_code_review(tmp_path: Path) -> None: + """install_bundles_for_init must call the marketplace installer for specfact-code-review.""" + installed_marketplace_ids: list[str] = [] + + def _fake_install_module(module_id: str, **kwargs: object) -> Path: + installed_marketplace_ids.append(module_id) + return tmp_path / module_id.split("/")[1] + + with ( + patch( + "specfact_cli.registry.module_installer.install_bundled_module", + return_value=False, + ), + patch( + "specfact_cli.registry.module_installer.install_module", + side_effect=_fake_install_module, + ), + ): + install_bundles_for_init( + ["specfact-code-review"], + install_root=tmp_path, + non_interactive=True, + ) + + assert any("specfact-code-review" in mid for mid in installed_marketplace_ids), ( + f"install_module was not called with specfact-code-review; calls: {installed_marketplace_ids}" + ) + + +def test_install_bundles_for_init_solo_developer_installs_both(tmp_path: Path) -> None: + """Solo-developer bundles install via marketplace (slim CLI has no per-command bundled workflow dirs).""" + installed_marketplace_ids: list[str] = [] + + def _fake_marketplace(module_id: str, **kwargs: object) -> Path: + installed_marketplace_ids.append(module_id) + return tmp_path / module_id.split("/")[1] + + with ( + patch( + "specfact_cli.registry.module_installer.install_bundled_module", + return_value=False, + ), + patch( + "specfact_cli.registry.module_installer.install_module", + side_effect=_fake_marketplace, + ), + ): + install_bundles_for_init( + ["specfact-codebase", "specfact-code-review"], + install_root=tmp_path, + non_interactive=True, + ) + + assert "nold-ai/specfact-codebase" in installed_marketplace_ids + assert "nold-ai/specfact-code-review" in installed_marketplace_ids diff --git a/tests/unit/specfact_cli/registry/test_versioned_bundle_deps.py b/tests/unit/specfact_cli/registry/test_versioned_bundle_deps.py new file mode 100644 index 00000000..700c20ea --- /dev/null +++ b/tests/unit/specfact_cli/registry/test_versioned_bundle_deps.py @@ -0,0 +1,102 @@ +"""Tests for versioned bundle dependency resolution. + +Spec: openspec/changes/docs-new-user-onboarding/specs/dependency-resolution/spec.md +Tasks: 7d.1 - 7d.10 +""" + +from __future__ import annotations + +from typing import Any + +import pytest + +from specfact_cli.registry.module_installer import _extract_bundle_dependencies + + +# ── Scenario: Registry entry declares a versioned bundle dependency ─────────── + + +def test_extract_bundle_dependencies_handles_versioned_object() -> None: + """_extract_bundle_dependencies must handle {"id": "...", "version": ">=x.y.z"} form.""" + metadata: dict[str, Any] = {"bundle_dependencies": [{"id": "nold-ai/specfact-project", "version": ">=0.41.0"}]} + deps = _extract_bundle_dependencies(metadata) + assert "nold-ai/specfact-project" in deps, f"Versioned object form not handled; got {deps}" + + +def test_extract_bundle_dependencies_handles_plain_string() -> None: + """_extract_bundle_dependencies must still handle plain string entries (backward compat).""" + metadata: dict[str, Any] = {"bundle_dependencies": ["nold-ai/specfact-project"]} + deps = _extract_bundle_dependencies(metadata) + assert "nold-ai/specfact-project" in deps + + +def test_extract_bundle_dependencies_handles_mixed_list() -> None: + """_extract_bundle_dependencies must handle a mix of string and versioned object entries.""" + metadata: dict[str, Any] = { + "bundle_dependencies": [ + "nold-ai/specfact-project", + {"id": "nold-ai/specfact-codebase", "version": ">=0.40.0"}, + ] + } + deps = _extract_bundle_dependencies(metadata) + assert "nold-ai/specfact-project" in deps + assert "nold-ai/specfact-codebase" in deps + + +def test_extract_bundle_dependencies_empty_list() -> None: + metadata: dict[str, Any] = {"bundle_dependencies": []} + deps = _extract_bundle_dependencies(metadata) + assert deps == [] + + +def test_extract_bundle_dependencies_missing_key() -> None: + metadata: dict[str, Any] = {} + deps = _extract_bundle_dependencies(metadata) + assert deps == [] + + +def test_extract_bundle_dependencies_rejects_object_without_id() -> None: + """Malformed bundle_dependencies objects must fail manifest validation, not be skipped.""" + metadata: dict[str, Any] = {"bundle_dependencies": [{"version": ">=1.0.0"}]} + with pytest.raises(ValueError, match="non-empty 'id'"): + _extract_bundle_dependencies(metadata) + + +def test_extract_bundle_dependencies_rejects_empty_id_object() -> None: + metadata: dict[str, Any] = {"bundle_dependencies": [{"id": "", "version": ">=1.0.0"}]} + with pytest.raises(ValueError, match="non-empty 'id'"): + _extract_bundle_dependencies(metadata) + + +def test_extract_bundle_dependencies_rejects_empty_string_entry() -> None: + metadata: dict[str, Any] = {"bundle_dependencies": ["nold-ai/specfact-project", ""]} + with pytest.raises(ValueError, match="string entry must be non-empty"): + _extract_bundle_dependencies(metadata) + + +# ── core_compatibility actionable error ─────────────────────────────────────── + + +def test_validate_install_manifest_constraints_actionable_error() -> None: + """core_compatibility mismatch must produce actionable message, not bare ValueError.""" + from specfact_cli.registry.module_installer import _validate_install_manifest_constraints + + metadata: dict[str, Any] = { + "name": "specfact-code-review", + "version": "0.1.0", + "core_compatibility": ">=99.0.0,<100.0.0", # impossibly high β€” always fails + } + + with pytest.raises((ValueError, SystemExit)) as exc_info: + _validate_install_manifest_constraints( + metadata, + "specfact-code-review", + trust_non_official=True, + non_interactive=True, + ) + + exc_val = str(exc_info.value) + # Must include version info, not just "incompatible" + assert any( + phrase in exc_val.lower() for phrase in ["requires", "specfact cli", ">=", "run:", "upgrade", "99.0.0"] + ), f"Error message not actionable: {exc_val!r}" diff --git a/tests/unit/specfact_cli/test_module_not_found_error.py b/tests/unit/specfact_cli/test_module_not_found_error.py new file mode 100644 index 00000000..6b7bbc6e --- /dev/null +++ b/tests/unit/specfact_cli/test_module_not_found_error.py @@ -0,0 +1,64 @@ +"""Tests for module-not-found error including corrective command. + +Spec: openspec/changes/docs-new-user-onboarding/specs/docs-vibecoder-entry-path/spec.md +Tasks: 6.1 - 6.3 +""" + +from __future__ import annotations + +import click +from typer.testing import CliRunner + +from specfact_cli.cli import app + + +runner = CliRunner() + + +def _unstyled(text: str) -> str: + return click.unstyle(text) + + +def test_module_not_found_error_includes_init_command() -> None: + """When a known command group is not installed, error must include the init command.""" + result = runner.invoke(app, ["code", "review", "run"]) + + output = _unstyled(result.output) + + # Must fail + assert result.exit_code != 0 + + # Must include the corrective init command + assert "init" in output, f"Error must mention 'init' command: {output!r}" + assert "--profile" in output or "profile" in output, f"Error must suggest --profile option: {output!r}" + + +def test_module_not_found_error_includes_uvx_command() -> None: + """Module-not-found error must include uvx-compatible init command for uvx users.""" + result = runner.invoke(app, ["code", "review", "run"]) + + output = _unstyled(result.output) + + assert result.exit_code != 0 + assert "specfact init" in output or "uvx" in output or "--profile" in output, ( + f"Error must include actionable init/profile guidance: {output!r}" + ) + + +def test_no_flat_import_in_missing_bundle_map() -> None: + """Flat `import` is not supported; hints use `code` / `project` groups only.""" + from specfact_cli.cli import _INVOKED_TO_MARKETPLACE_MODULE + + assert "import" not in _INVOKED_TO_MARKETPLACE_MODULE + assert _INVOKED_TO_MARKETPLACE_MODULE["project"] == "nold-ai/specfact-project" + + +def test_module_not_found_error_includes_init_profile_placeholder() -> None: + """Module-not-found error for 'code' command must include init --profile guidance.""" + result = runner.invoke(app, ["code"]) + + output = _unstyled(result.output) + + assert result.exit_code != 0 + assert "specfact init" in output, f"Error must mention specfact init: {output!r}" + assert "--profile" in output or "" in output, f"Error must suggest a profile: {output!r}" diff --git a/tests/unit/tools/test_smart_test_coverage.py b/tests/unit/tools/test_smart_test_coverage.py index 8971bdba..f10de220 100644 --- a/tests/unit/tools/test_smart_test_coverage.py +++ b/tests/unit/tools/test_smart_test_coverage.py @@ -473,7 +473,7 @@ def test_show_latest_log(self, caplog): @patch.object(SmartCoverageManager, "_run_changed_only") def test_run_smart_tests_with_changes(self, mock_changed_only): """Test running smart tests when changes are detected (changed-only mode).""" - mock_changed_only.return_value = True + mock_changed_only.return_value = (True, True) with ( patch.object(self.manager, "_has_source_changes", return_value=True), @@ -503,6 +503,7 @@ def test_run_smart_tests_no_changes(self, mock_run_tests): "test_count": 150, "coverage_percentage": 85.5, "success": True, + "last_run": "2025-01-01T12:00:00", } result = self.manager.run_smart_tests() diff --git a/tests/unit/tools/test_smart_test_coverage_enhanced.py b/tests/unit/tools/test_smart_test_coverage_enhanced.py index c59f85fb..9a4869ca 100644 --- a/tests/unit/tools/test_smart_test_coverage_enhanced.py +++ b/tests/unit/tools/test_smart_test_coverage_enhanced.py @@ -205,12 +205,34 @@ def test_run_smart_tests_auto_with_changes(self): patch.object(self.manager, "_has_config_changes", return_value=False), patch.object(self.manager, "_run_changed_only") as mock_changed_only, ): - mock_changed_only.return_value = True + mock_changed_only.return_value = (True, True) result = self.manager.run_smart_tests("auto") assert result is True mock_changed_only.assert_called_once() + def test_run_changed_only_without_baseline_runs_full(self): + """No last_full_run: incremental cannot compute diffs; must run full suite.""" + self.manager.cache.pop("last_full_run", None) + with patch.object(self.manager, "_run_full_tests", return_value=True) as mock_full: + ok, ran_any = self.manager._run_changed_only() + assert ok is True + assert ran_any is True + mock_full.assert_called_once() + + def test_run_smart_tests_force_auto_with_no_incremental_runs_full(self): + """Force + auto with no mapped tests: run full suite instead of no-op skip.""" + with ( + patch.object(self.manager, "_has_source_changes", return_value=True), + patch.object(self.manager, "_has_test_changes", return_value=False), + patch.object(self.manager, "_has_config_changes", return_value=False), + patch.object(self.manager, "_run_changed_only", return_value=(True, False)), + patch.object(self.manager, "_run_full_tests", return_value=True) as mock_full, + ): + result = self.manager.run_smart_tests("auto", force=True) + assert result is True + mock_full.assert_called_once() + def test_run_smart_tests_auto_without_changes(self): """Test smart tests in auto mode without changes.""" with ( diff --git a/tests/unit/validators/test_bundle_dependency_install.py b/tests/unit/validators/test_bundle_dependency_install.py index 614780e8..2fa5109f 100644 --- a/tests/unit/validators/test_bundle_dependency_install.py +++ b/tests/unit/validators/test_bundle_dependency_install.py @@ -45,6 +45,9 @@ def _create_module_tarball( def _stub_integrity_and_deps(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr("specfact_cli.registry.module_installer.resolve_dependencies", lambda *_args, **_kwargs: None) + monkeypatch.setattr( + "specfact_cli.registry.module_installer.install_resolved_pip_requirements", lambda *_args, **_kwargs: None + ) monkeypatch.setattr("specfact_cli.registry.module_installer.verify_module_artifact", lambda *_args, **_kwargs: True) monkeypatch.setattr( "specfact_cli.registry.module_installer.ensure_publisher_trusted", lambda *_args, **_kwargs: None diff --git a/tools/contract_first_smart_test.py b/tools/contract_first_smart_test.py index a9cd65ba..2a5a4bbd 100644 --- a/tools/contract_first_smart_test.py +++ b/tools/contract_first_smart_test.py @@ -713,7 +713,7 @@ def _run_contract_exploration( return success, exploration_results - def _run_scenario_tests(self) -> tuple[bool, int, float]: + def _run_scenario_tests(self) -> tuple[bool, int, float | None]: """Run scenario tests (integration tests with contract references).""" logger.info("Running scenario tests...") @@ -739,7 +739,7 @@ def _run_scenario_tests(self) -> tuple[bool, int, float]: if not scenario_tests: logger.info("No scenario tests found (integration tests with contract references)") - return True, 0, 100.0 + return True, 0, None logger.info("Found %d scenario tests:", len(scenario_tests)) for test_file in scenario_tests: diff --git a/tools/semgrep/README.md b/tools/semgrep/README.md index 72b5cb78..3e7c9b9c 100644 --- a/tools/semgrep/README.md +++ b/tools/semgrep/README.md @@ -8,6 +8,16 @@ This directory contains Semgrep rules for: **Note**: These files (`tools/semgrep/*.yml`) are used for **development** (hatch scripts, local testing). For **runtime** use in the installed package, the files are bundled as `src/specfact_cli/resources/semgrep/*.yml` and will be automatically included in the package distribution. +### Running Semgrep (1.38+) + +Semgrep no longer auto-discovers `.semgrep.yml` in the project root; pass **`--config`** explicitly ([CLI reference](https://semgrep.dev/docs/cli-reference)). + +- **All rules in this directory**: `hatch run semgrep-full src` (uses `semgrep --config tools/semgrep`). +- **Single rule file** (e.g. async only): `hatch run scan src` (uses `tools/semgrep/async.yml`). +- **Raw CLI**: `semgrep --config tools/semgrep src` + +The Hatch env includes **`setuptools`** so Semgrep’s OpenTelemetry stack can import `pkg_resources` on Python 3.12+ minimal venvs. + ## Rules ### `async.yml` - Python Async Anti-Patterns diff --git a/tools/smart_test_coverage.py b/tools/smart_test_coverage.py index 5beb777b..571dfba7 100755 --- a/tools/smart_test_coverage.py +++ b/tools/smart_test_coverage.py @@ -39,6 +39,7 @@ from pathlib import Path from typing import Any, TextIO, cast +from beartype import beartype from icontract import ensure, require @@ -681,6 +682,58 @@ def _parse_total_coverage_percent(output_lines: list[str]) -> float: pass return coverage_percentage + def _coverage_data_file_path(self) -> Path: + """Path to the coverage data file (matches ``[tool.coverage.run] data_file`` in pyproject.toml).""" + return self.project_root / "logs" / "tests" / "coverage" / ".coverage" + + def _run_coverage_report_subprocess(self) -> list[str]: + """Run ``coverage report -m`` and return output lines (for parsing TOTAL %%).""" + cmd = [sys.executable, "-m", "coverage", "report", "-m"] + try: + proc = subprocess.run( + cmd, + cwd=self.project_root, + capture_output=True, + text=True, + timeout=600, + check=False, + ) + except (OSError, subprocess.TimeoutExpired) as exc: + logger.warning("coverage report subprocess failed: %s", exc) + return [] + text = (proc.stdout or "") + (proc.stderr or "") + if proc.returncode not in (0, 2) and not text.strip(): + logger.warning("coverage report failed with rc=%s", proc.returncode) + return [] + lines_out: list[str] = [] + for line in text.splitlines(keepends=True): + if not line.endswith("\n"): + line += "\n" + lines_out.append(line) + return lines_out + + def _append_coverage_report_if_needed(self, output_lines: list[str], log_file: TextIO) -> None: + """Append ``coverage report`` output when ``coverage run`` left no parsable TOTAL line. + + Hatch ``run-cov`` uses ``coverage run -m pytest`` without printing a report; line coverage + only appears after ``coverage report``. Without this, smart-test always logged 0%% coverage. + """ + if self._parse_total_coverage_percent(output_lines) > 0: + return + if not self._coverage_data_file_path().is_file(): + logger.debug( + "Skipping coverage report append: no data file at %s", + self._coverage_data_file_path(), + ) + return + report_lines = self._run_coverage_report_subprocess() + if not report_lines: + return + for line in report_lines: + sys.stdout.write(line) + log_file.write(line) + output_lines.append(line) + @staticmethod def _pytest_count_from_banner_line(line: str) -> int | None: """Parse count from ``======== N passed`` style summary lines.""" @@ -751,11 +804,14 @@ def _run_coverage_hatch_or_pytest(self, log_file: TextIO) -> tuple[int | None, l pytest_cmd = self._build_pytest_cmd(with_coverage=True, parallel=True) rc2, out2, _ = self._popen_stream_to_log(pytest_cmd, log_file, timeout=timeout_full) output_lines.extend(out2) + self._append_coverage_report_if_needed(output_lines, log_file) return rc2 if rc2 is not None else 1, output_lines + self._append_coverage_report_if_needed(output_lines, log_file) return rc, output_lines pytest_cmd = self._build_pytest_cmd(with_coverage=True, parallel=True) rc, out, _ = self._popen_stream_to_log(pytest_cmd, log_file, timeout=timeout_full) output_lines.extend(out) + self._append_coverage_report_if_needed(output_lines, log_file) return rc if rc is not None else 1, output_lines def _run_leveled_hatch_or_pytest( @@ -782,12 +838,18 @@ def _run_leveled_hatch_or_pytest( logger.debug("Executing fallback: %s", shlex.join(pytest_cmd)) rc2, out2, _ = self._popen_stream_to_log(pytest_cmd, log_file, timeout=timeout_seconds) output_lines.extend(out2) + if want_coverage: + self._append_coverage_report_if_needed(output_lines, log_file) return rc2 if rc2 is not None else 1, output_lines + if want_coverage: + self._append_coverage_report_if_needed(output_lines, log_file) return rc, output_lines pytest_cmd = self._build_pytest_cmd(with_coverage=want_coverage, extra_args=test_file_strings) logger.info("Hatch disabled; executing pytest directly: %s", shlex.join(pytest_cmd)) rc, out, _ = self._popen_stream_to_log(pytest_cmd, log_file, timeout=timeout_seconds) output_lines.extend(out) + if want_coverage: + self._append_coverage_report_if_needed(output_lines, log_file) return rc if rc is not None else 1, output_lines def _adjust_success_for_coverage_threshold( @@ -795,10 +857,12 @@ def _adjust_success_for_coverage_threshold( success: bool, test_level: str, test_count: int, - coverage_percentage: float, + coverage_percentage: float | None, output_lines: list[str], ) -> bool: """Treat threshold-only failures as success for unit/folder runs when appropriate.""" + if coverage_percentage is None: + return success if success or test_level not in ("unit", "folder") or test_count <= 0 or coverage_percentage <= 0: return success if not any(self._line_indicates_coverage_threshold_failure(line) for line in output_lines): @@ -816,15 +880,21 @@ def _log_completed_test_run( success: bool, test_level: str, test_count: int, - coverage_percentage: float, - tested_coverage_percentage: float, + coverage_percentage: float | None, + tested_coverage_percentage: float | None, test_log_file: Path, coverage_log_file: Path, return_code: int | None, ) -> None: """Emit summary log lines after a leveled test run.""" if success: - if test_level in ("unit", "folder") and tested_coverage_percentage > 0: + if coverage_percentage is None or tested_coverage_percentage is None: + logger.info( + "%s tests completed: %d tests; line coverage not measured for this level", + test_level.title(), + test_count, + ) + elif test_level in ("unit", "folder") and tested_coverage_percentage > 0: logger.info( "%s tests completed: %d tests, %.1f%% overall, %.1f%% tested code coverage", test_level.title(), @@ -846,7 +916,9 @@ def _log_completed_test_run( logger.info("Check %s test log for details: %s", test_level, test_log_file) logger.info("Check %s coverage log for details: %s", test_level, coverage_log_file) - def _log_tested_coverage_vs_threshold(self, test_level: str, tested_coverage_percentage: float) -> None: + def _log_tested_coverage_vs_threshold(self, test_level: str, tested_coverage_percentage: float | None) -> None: + if tested_coverage_percentage is None: + return if test_level not in ("unit", "folder") or tested_coverage_percentage <= 0: return if tested_coverage_percentage < self.coverage_threshold: @@ -957,6 +1029,20 @@ def _get_unit_tests_for_files(self, modified_files: list[Path]) -> list[Path]: return unit_tests + def _modified_sources_proven_by_unit_batch( + self, modified_sources: list[Path], unit_tests_run: list[Path] + ) -> list[Path]: + """Return modified sources whose full set of mapped unit tests was included in the batch run.""" + run = {str(p.resolve()) for p in unit_tests_run} + proven: list[Path] = [] + for src in modified_sources: + mapped = self._get_unit_tests_for_files([src]) + if not mapped: + continue + if all(str(t.resolve()) in run for t in mapped): + proven.append(src) + return proven + def _get_files_in_folders(self, modified_folders: set[Path]) -> list[Path]: """Get all source files in the modified folders.""" folder_files: list[Path] = [] @@ -1139,8 +1225,11 @@ def _run_coverage_tests(self) -> tuple[bool, int, float]: logger.error("Error running tests: %s", e) return False, 0, 0 - def _run_tests(self, test_files: list[Path], test_level: str) -> tuple[bool, int, float]: - """Run tests for specific files and return (success, test_count, coverage_percentage).""" + def _run_tests(self, test_files: list[Path], test_level: str) -> tuple[bool, int, float | None]: + """Run tests for specific files and return (success, test_count, coverage_percentage). + + ``coverage_percentage`` is None for levels without reliable line coverage (integration/e2e/scenarios). + """ if not test_files: logger.info("No %s tests found to run", test_level) return True, 0, 100.0 @@ -1192,16 +1281,16 @@ def _run_tests(self, test_files: list[Path], test_level: str) -> tuple[bool, int test_count = self._parse_pytest_test_count(output_lines) success = return_code == 0 - if test_level in ("integration", "e2e"): - coverage_percentage = 100.0 - tested_coverage_percentage = 100.0 + # Integration, E2E, scenarios: line coverage is not a reliable metric for this runner. + if test_level in ("integration", "e2e", "scenarios"): + coverage_percentage = None + tested_coverage_percentage = None else: coverage_percentage = self._parse_total_coverage_percent(output_lines) - - if test_level in ("unit", "folder") and test_files: - tested_coverage_percentage = self._calculate_tested_coverage(test_files, output_lines) - else: - tested_coverage_percentage = coverage_percentage + if test_level in ("unit", "folder") and test_files: + tested_coverage_percentage = self._calculate_tested_coverage(test_files, output_lines) + else: + tested_coverage_percentage = coverage_percentage success = self._adjust_success_for_coverage_threshold( success, test_level, test_count, coverage_percentage, output_lines @@ -1284,8 +1373,10 @@ def _check_coverage_threshold(self, coverage_percentage: float): ) def _maybe_warn_subthreshold_non_full( - self, success: bool, enforce_threshold: bool, coverage_percentage: float + self, success: bool, enforce_threshold: bool, coverage_percentage: float | None ) -> None: + if coverage_percentage is None: + return if success and enforce_threshold: self._check_coverage_threshold(coverage_percentage) elif success and not enforce_threshold and coverage_percentage < self.coverage_threshold: @@ -1319,12 +1410,13 @@ def _update_cache( self, success: bool, test_count: int, - coverage_percentage: float, + coverage_percentage: float | None, enforce_threshold: bool = True, update_only: bool = False, updated_sources: list[Path] | None = None, updated_tests: list[Path] | None = None, updated_configs: list[Path] | None = None, + update_coverage_in_cache: bool = True, ) -> None: """Update cache and hashes. If update_only is True, only update hashes for provided file lists (when their tests passed). @@ -1354,10 +1446,15 @@ def update_map(paths: list[Path] | None, target: dict[str, str]): self._refresh_all_tracked_hashes(file_hashes, test_file_hashes, config_file_hashes) # Update cache; keep last_full_run as the last index time (not necessarily a full suite) + prior_cov = float(self.cache.get("coverage_percentage", 0.0)) + if coverage_percentage is None or not update_coverage_in_cache: + cov_for_cache = prior_cov + else: + cov_for_cache = coverage_percentage self.cache.update( { "last_full_run": datetime.now().isoformat(), - "coverage_percentage": coverage_percentage if success else self.cache.get("coverage_percentage", 0), + "coverage_percentage": cov_for_cache if success else self.cache.get("coverage_percentage", 0), "file_hashes": file_hashes, "test_file_hashes": test_file_hashes, "config_file_hashes": config_file_hashes, @@ -1474,7 +1571,8 @@ def show_latest_log(self) -> None: except Exception as e: logger.error("Error reading log file: %s", e) - @require(lambda test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) + @beartype + @require(lambda self, test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) @ensure(lambda result: isinstance(result, bool), "run_smart_tests must return bool") def run_smart_tests(self, test_level: str = "auto", force: bool = False) -> bool: """Run tests with smart change detection and specified level.""" @@ -1485,9 +1583,15 @@ def run_smart_tests(self, test_level: str = "auto", force: bool = False) -> bool config_changed = self._has_config_changes() if source_changed or test_changed or config_changed or force: - return self._run_changed_only() - # No changes - use cached data + ok, ran_any = self._run_changed_only() + if force and not ran_any: + return self._run_full_tests() + return ok + # No changes - use cached data only when a baseline run has been recorded status = self.get_status() + if not status.get("last_run"): + logger.info("No cached full-run baseline; running full test suite once…") + return self._run_full_tests() logger.info( "Using cached results: %d tests, %.1f%% coverage", status["test_count"], @@ -1499,10 +1603,13 @@ def run_smart_tests(self, test_level: str = "auto", force: bool = False) -> bool return self.run_tests_by_level(test_level) return self.run_tests_by_level(test_level) - @require(lambda test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) + @beartype + @require(lambda self, test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) @ensure(lambda result: isinstance(result, bool), "run_tests_by_level must return bool") def run_tests_by_level(self, test_level: str) -> bool: - """Run tests by specified level: unit, folder, integration, e2e, or full.""" + """Run tests by specified level: unit, folder, integration, e2e, full, or auto (smart detection).""" + if test_level == "auto": + return self.run_smart_tests("auto", force=False) if test_level == "unit": return self._run_unit_tests() if test_level == "folder": @@ -1652,8 +1759,15 @@ def _run_integration_tests(self) -> bool: enforce_threshold=False, update_only=True, updated_tests=integration_tests, + update_coverage_in_cache=False, ) - logger.info("Integration tests completed: %d tests, %.1f%% coverage", test_count, coverage_percentage) + if coverage_percentage is None: + logger.info( + "Integration tests completed: %d tests; line coverage not measured for this level", + test_count, + ) + else: + logger.info("Integration tests completed: %d tests, %.1f%% coverage", test_count, coverage_percentage) logger.info( "Note: Integration test coverage is not enforced - focus is on component interaction validation" ) @@ -1689,8 +1803,15 @@ def _run_e2e_tests(self) -> bool: enforce_threshold=False, update_only=True, updated_tests=e2e_tests, + update_coverage_in_cache=False, ) - logger.info("E2E tests completed: %d tests, %.1f%% coverage", test_count, coverage_percentage) + if coverage_percentage is None: + logger.info( + "E2E tests completed: %d tests; line coverage not measured for this level", + test_count, + ) + else: + logger.info("E2E tests completed: %d tests, %.1f%% coverage", test_count, coverage_percentage) logger.info("Note: E2E test coverage is not enforced - focus is on full workflow validation") else: logger.error("E2E tests failed") @@ -1706,15 +1827,25 @@ def _run_full_tests(self) -> bool: self._update_cache(True, test_count, coverage_percentage, enforce_threshold=False) return success - def _run_changed_only(self) -> bool: + def _run_changed_only(self) -> tuple[bool, bool]: """Run only tests impacted by changes since last cached hashes. - - Unit: tests mapped from modified source files + directly modified unit tests - - Integration/E2E: only directly modified tests - No full-suite fallback here; CI should catch broader regressions.""" + + Returns: + (success, ran_any): ``ran_any`` is False when no mapped tests ran (incremental no-op). + + When there is no ``last_full_run`` baseline and incremental work would run nothing, + runs a one-time full suite to establish coverage/hash baseline (avoids zero cached coverage). + """ # Collect modified items modified_sources = self._get_modified_files() modified_tests = self._get_modified_test_files() + if modified_sources and self.cache.get("last_full_run"): + unmapped = [s for s in modified_sources if not self._get_unit_tests_for_files([s])] + if unmapped: + logger.info("Modified source(s) have no unit-mapped tests; running full suite to verify baseline.") + return self._run_full_tests(), True + # Map modified sources to unit tests unit_from_sources = self._get_unit_tests_for_files(modified_sources) # Split modified tests by level @@ -1742,14 +1873,14 @@ def dedupe(paths: list[Path]) -> list[Path]: ran_any = True ok, unit_count, unit_cov = self._run_tests(unit_tests, "unit") if ok: - # Update hashes only for modified sources we mapped and the unit test files themselves + proven_sources = self._modified_sources_proven_by_unit_batch(modified_sources, unit_tests) self._update_cache( True, unit_count, unit_cov, enforce_threshold=False, update_only=True, - updated_sources=modified_sources, + updated_sources=proven_sources, updated_tests=unit_tests, ) overall_success = overall_success and ok @@ -1758,7 +1889,13 @@ def dedupe(paths: list[Path]) -> list[Path]: ok, integ_count, integ_cov = self._run_tests(integ_tests, "integration") if ok: self._update_cache( - True, integ_count, integ_cov, enforce_threshold=False, update_only=True, updated_tests=integ_tests + True, + integ_count, + integ_cov, + enforce_threshold=False, + update_only=True, + updated_tests=integ_tests, + update_coverage_in_cache=False, ) overall_success = overall_success and ok if e2e_tests: @@ -1766,17 +1903,28 @@ def dedupe(paths: list[Path]) -> list[Path]: ok, e2e_count, e2e_cov = self._run_tests(e2e_tests, "e2e") if ok: self._update_cache( - True, e2e_count, e2e_cov, enforce_threshold=False, update_only=True, updated_tests=e2e_tests + True, + e2e_count, + e2e_cov, + enforce_threshold=False, + update_only=True, + updated_tests=e2e_tests, + update_coverage_in_cache=False, ) overall_success = overall_success and ok if not ran_any: + if not self.cache.get("last_full_run"): + logger.info("No incremental baseline; running full test suite once to establish cache…") + success = self._run_full_tests() + return success, True + if self._has_config_changes(): + logger.info("Configuration changed but no mapped tests to run; running full suite…") + return self._run_full_tests(), True logger.info("No changed files detected that map to tests - skipping test execution") - # Still keep cache timestamp to allow future git comparisons - self._update_cache(True, 0, self.cache.get("coverage_percentage", 0.0), enforce_threshold=False) - return True + return True, False - return overall_success + return overall_success, True @require(lambda test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) @ensure(lambda result: isinstance(result, bool), "force_full_run must return bool") @@ -1786,6 +1934,8 @@ def force_full_run(self, test_level: str = "full") -> bool: if test_level == "full": success, test_count, coverage_percentage = self._run_coverage_tests() self._update_cache(success, test_count, coverage_percentage, enforce_threshold=True) + elif test_level == "auto": + success = self.run_smart_tests("auto", force=True) else: success = self.run_tests_by_level(test_level) return success