From 07e9e742dc3c30c3320a6f0d43fd91ffe2135838 Mon Sep 17 00:00:00 2001 From: Dominikus Nold Date: Thu, 2 Apr 2026 00:21:32 +0200 Subject: [PATCH 1/8] =?UTF-8?q?Fix=20content=20for=20install,=20sync,=20un?= =?UTF-8?q?install=C3=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/getting-started/installation.md | 40 +-- docs/getting-started/quickstart.md | 47 +-- docs/index.md | 97 ++--- .../changes/docs-new-user-onboarding/tasks.md | 4 +- src/specfact_cli/cli.py | 10 +- .../modules/init/module-package.yaml | 6 +- src/specfact_cli/modules/init/src/commands.py | 2 + .../modules/init/src/first_run_selection.py | 32 +- .../module_registry/module-package.yaml | 6 +- .../modules/module_registry/src/commands.py | 340 ++++++++++++++---- .../registry/dependency_resolver.py | 28 +- src/specfact_cli/registry/module_installer.py | 14 +- .../test_module_upgrade_improvements.py | 229 ++++++++++++ .../test_multi_module_install_uninstall.py | 208 +++++++++++ .../test_dependency_resolver_pip_free.py | 76 ++++ .../registry/test_profile_presets.py | 145 ++++++++ .../registry/test_versioned_bundle_deps.py | 83 +++++ .../test_module_not_found_error.py | 57 +++ 18 files changed, 1230 insertions(+), 194 deletions(-) create mode 100644 tests/unit/specfact_cli/modules/test_module_upgrade_improvements.py create mode 100644 tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py create mode 100644 tests/unit/specfact_cli/registry/test_dependency_resolver_pip_free.py create mode 100644 tests/unit/specfact_cli/registry/test_profile_presets.py create mode 100644 tests/unit/specfact_cli/registry/test_versioned_bundle_deps.py create mode 100644 tests/unit/specfact_cli/test_module_not_found_error.py diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md index a664e2f8..fd7fd47a 100644 --- a/docs/getting-started/installation.md +++ b/docs/getting-started/installation.md @@ -9,27 +9,25 @@ expertise_level: [beginner] # Getting Started with SpecFact CLI -This guide will help you get started with SpecFact CLI in under 60 seconds. +This guide will help you get started with SpecFact CLI in under 60 seconds — first with **no install** (uvx), then with a **persistent** install (pip) when you want IDE workflows and a stable `specfact` command. -> **Primary Use Case**: SpecFact CLI is designed for **brownfield code modernization** - reverse-engineering existing codebases into documented specs with runtime contract enforcement. See [First Steps](quickstart.md) for brownfield workflows. +> **Primary use case**: brownfield code modernization — reverse-engineering existing codebases into documented specs with runtime contract enforcement. See the [5-Minute Quickstart](quickstart.md) for a full walkthrough. -## Installation +## Try it now — no install required -### Option 1: uvx (CLI-only Mode) - -No installation required - run directly: +Run SpecFact from PyPI without installing into your environment: ```bash -uvx specfact-cli@latest --help +cd /path/to/your/git/repo +uvx specfact-cli init --profile solo-developer +uvx specfact-cli code review run --path . --scope full ``` -**Best for**: Quick testing, CI/CD, one-off commands +You should see a **Verdict**, a **Score**, and a list of findings. That is the fastest way to validate SpecFact on real code. [Read the full quickstart →](quickstart.md) -**Limitations**: CLI-only mode uses deterministic local analysis and may show limited results for very small test cases. If you want IDE slash-command workflows with your own AI copilot, use the installed CLI setup in Option 2. +## Install for persistent use -### Option 2: pip (Installed CLI + IDE Prompt Mode) - -**Required for**: local `specfact` command availability, IDE integration, and slash-command workflows +Use pip when you want a local `specfact` command, IDE integration, and slash-command workflows. ```bash # System-wide @@ -68,23 +66,21 @@ specfact init --install all Then set up IDE integration: ```bash -# Initialize IDE integration (one-time per project) specfact init ide - -# Or specify IDE explicitly specfact init ide --ide cursor specfact init ide --ide vscode - -# Install required packages for contract enhancement specfact init ide --install-deps - -# Initialize for specific IDE and install dependencies specfact init ide --ide cursor --install-deps ``` **Important**: SpecFact CLI does **not** ship with built-in AI. `specfact init ide` installs prompt templates for supported IDEs so your chosen AI copilot can call SpecFact commands in a guided workflow. -### Option 3: Container +[More options ↓](#more-options) + +## More options +{: #more-options} + +### Container ```bash # Docker @@ -94,7 +90,7 @@ docker run --rm -v $(pwd):/workspace ghcr.io/nold-ai/specfact-cli:latest --help podman run --rm -v $(pwd):/workspace ghcr.io/nold-ai/specfact-cli:latest --help ``` -### Option 4: GitHub Action +### GitHub Action Create `.github/workflows/specfact.yml`: @@ -212,7 +208,7 @@ Profile outcomes: | Profile | Installed bundles | Available groups | |---|---|---| -| `solo-developer` | `specfact-codebase` | `code` | +| `solo-developer` | `specfact-codebase`, `specfact-code-review` | `code` | | `backlog-team` | `specfact-project`, `specfact-backlog`, `specfact-codebase` | `project`, `backlog`, `code` | | `api-first-team` | `specfact-spec`, `specfact-codebase` (+`specfact-project` dependency) | `project`, `code`, `spec` | | `enterprise-full-stack` | all five bundles | `project`, `backlog`, `code`, `spec`, `govern` | diff --git a/docs/getting-started/quickstart.md b/docs/getting-started/quickstart.md index 1d35e081..e75741fb 100644 --- a/docs/getting-started/quickstart.md +++ b/docs/getting-started/quickstart.md @@ -4,71 +4,72 @@ title: 5-Minute Quickstart permalink: /getting-started/quickstart/ redirect_from: - /getting-started/first-steps/ -description: Get SpecFact CLI running in under 5 minutes - install, bootstrap, and analyze your first codebase. -keywords: [quickstart, first-run, bootstrap, analysis] +description: Get SpecFact CLI running in under 5 minutes — uvx first, then optional pip install for IDE workflows and deeper analysis. +keywords: [quickstart, first-run, bootstrap, analysis, uvx] audience: [solo, team] expertise_level: [beginner] doc_owner: specfact-cli tracks: - src/specfact_cli/** - openspec/** -last_reviewed: 2026-03-29 +last_reviewed: 2026-04-02 exempt: false exempt_reason: "" --- # 5-Minute Quickstart -Get from zero to your first SpecFact analysis in under 5 minutes. +Get from zero to a **scored code review** in a few commands. This path is aimed at developers who want one command and one clear result before reading about modules, profiles, or architecture. ## Prerequisites - Python 3.11+ (`python3 --version`) - A Git repository to analyze (or create a test project) -## Step 1: Install +## Step 1: Bootstrap with uvx (no pip install) + +From your repo root: ```bash -pip install specfact-cli +uvx specfact-cli init --profile solo-developer ``` -Or try without installing: `uvx specfact-cli@latest --help` +This installs the workflow bundles for the solo-developer profile (including the code-review module). See [specfact init](/core-cli/init/) for other profiles. -## Step 2: Bootstrap +## Step 2: Run a scored code review ```bash -# Navigate to your project -cd /path/to/your/project - -# Initialize with a profile -specfact init --profile solo-developer +uvx specfact-cli code review run --path . --scope full ``` -This installs the default set of workflow bundles. See [specfact init](/core-cli/init/) for other profiles. +You should see a **Verdict**, **Score**, and findings. That is the fastest “aha” path on a real codebase. + +## Step 3: Install SpecFact locally (optional) -## Step 3: Set Up IDE (Optional) +When you want a stable `specfact` command and IDE integration, install with pip: ```bash -specfact init ide --ide cursor --install-deps +pip install specfact-cli +cd /path/to/your/project +specfact init --profile solo-developer ``` -This creates `.specfact/` directory structure and IDE-specific prompt templates. - -## Step 4: Analyze Your Codebase +## Step 4: Set Up IDE (Optional) ```bash -specfact code import my-project --repo . +specfact init ide --ide cursor --install-deps ``` -SpecFact analyzes your code and extracts features, user stories, and dependency graphs into a project bundle at `.specfact/projects/my-project/`. +This creates `.specfact/` directory structure and IDE-specific prompt templates. -## Step 5: Check Project Health +## Step 5: Analyze Your Codebase and Check Health ```bash +specfact code import my-project --repo . specfact project health-check ``` -Review what SpecFact discovered about your codebase. +`code import` analyzes your code and extracts features, user stories, and dependency graphs into a project bundle at `.specfact/projects/my-project/`. `project health-check` summarizes what SpecFact discovered. ## Step 6: Validate diff --git a/docs/index.md b/docs/index.md index 9a46c7e7..8a81e46e 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,117 +1,95 @@ --- layout: default title: SpecFact CLI Documentation -description: SpecFact is the validation and alignment layer for software delivery. Start here for the core CLI story, first steps, and the handoff into module-deep workflows. +description: Point SpecFact at your code, get a scored review and a fix list in minutes — then go deeper into backlog, specs, and CI when you need to. permalink: / -keywords: [specfact, core-cli, runtime, module-system, architecture] +keywords: [specfact, core-cli, quickstart, code review, onboarding] audience: [solo, team, enterprise] expertise_level: [beginner, intermediate, advanced] doc_owner: specfact-cli tracks: - src/specfact_cli/** - openspec/** -last_reviewed: 2026-03-29 +last_reviewed: 2026-04-02 exempt: false exempt_reason: "" --- # SpecFact CLI Documentation -SpecFact is the validation and alignment layer for software delivery. +**Point SpecFact at your code. Get a score and a list of what to fix.** No week-long setup — start with two commands, then add IDE prompts, backlog workflows, or CI gates when you want more. -This site is the canonical starting point for the core CLI story: what SpecFact is, why it exists, -what value you get from it, how to get started, and when to move into deeper bundle-owned workflows. +```bash +uvx specfact-cli init --profile solo-developer +uvx specfact-cli code review run --path . --scope full +``` -SpecFact does **not** include built-in AI. It pairs deterministic CLI commands with your chosen IDE -and copilot so fast-moving work has a stronger validation and alignment layer around it. +You should see a **Verdict** (PASS/FAIL), a **Score**, and a list of findings (for example dozens of categorized items on a real repo). That is the fastest way to see SpecFact on an existing project. [Read the full quickstart →](/getting-started/quickstart/) + +SpecFact does **not** include built-in AI. It pairs deterministic CLI commands with your chosen IDE and copilot so fast-moving work has a stronger validation and alignment layer around it. --- ## What is SpecFact? -SpecFact helps you keep backlog intent, specifications, implementation, and validation from drifting -apart. +SpecFact helps you keep backlog intent, specifications, implementation, and validation from drifting apart. It is especially useful when: + - AI-assisted or “vibe-coded” work needs more rigor - brownfield systems need trustworthy reverse-engineered understanding - teams want to avoid the “I wanted X but got Y” delivery failure - organizations need a path toward stronger shared policy enforcement -## Why does it exist? - -Software delivery drifts in stages. Expectations change as they move from backlog language to -specification, from specification to implementation, and from implementation to review. SpecFact -exists to reduce that drift by giving you deterministic tooling for analysis, validation, and -alignment. - ## Why should I use it? -Use SpecFact when you want faster delivery without losing validation, stronger brownfield -understanding before making changes, and less drift between backlog intent, specifications, and the -code that actually lands. +Use SpecFact when you want faster delivery without losing validation, stronger brownfield understanding before making changes, and less drift between backlog intent, specifications, and the code that actually lands. ## What do I get? With SpecFact, you get: + - deterministic local tooling instead of opaque cloud dependence - a validation layer around AI-assisted delivery - codebase analysis and sidecar validation for brownfield work - stronger backlog/spec/code alignment -- a clean handoff from core runtime docs into module-deep workflows on `modules.specfact.io` +- a clean handoff from this site into module-deep workflows on [modules.specfact.io](https://modules.specfact.io/) ## How to get started -1. **[Installation](/getting-started/installation/)** - Install SpecFact CLI -2. **[5-Minute Quickstart](/getting-started/quickstart/)** - Get first value quickly -3. **[specfact init](/core-cli/init/)** - Bootstrap the core runtime and your local setup -4. **[Bootstrap Checklist](/module-system/bootstrap-checklist/)** - Verify bundle readiness - -If you are new to SpecFact, start here before jumping into module-deep workflows. +1. **[Installation](/getting-started/installation/)** — uvx (no install) or pip (persistent CLI) +2. **[5-Minute Quickstart](/getting-started/quickstart/)** — First commands on a repo +3. **[specfact init](/core-cli/init/)** — Profiles, bundles, and IDE setup +4. **[Bootstrap Checklist](/module-system/bootstrap-checklist/)** — Verify bundle readiness -## Choose Your Path +## Choose your path
-

Greenfield & AI-assisted delivery

-

Use SpecFact as the validation layer around fast-moving implementation work.

+

See what's wrong with your code right now

+

Run a scored code review on an existing repo with uvx, then iterate.

-

Brownfield and reverse engineering

-

Use SpecFact to understand an existing system and then hand insight into spec-first workflows.

+

Set up IDE slash-command workflows

+

Install the CLI, bootstrap bundles, then export prompts for Cursor, VS Code, and other IDEs.

-
-
-

Backlog to code alignment

-

Use SpecFact when the main problem is drift between expectations, specs, and implementation.

-
-

Team and policy enforcement

-

Use core runtime, governance, and shared workflow conventions to scale rigor across teams.

+

Add a pre-commit or CI gate

+

Wire SpecFact into local hooks or GitHub Actions for repeatable checks.

@@ -120,12 +98,11 @@ If you are new to SpecFact, start here before jumping into module-deep workflows The `specfact-cli` package provides the stable platform surface: -- **[specfact init](/core-cli/init/)** - Bootstrap and IDE setup -- **[specfact module](/core-cli/module/)** - Module lifecycle management -- **[specfact upgrade](/core-cli/upgrade/)** - CLI updates -- Runtime contracts, module discovery, registry bootstrapping, publisher trust, and shared orchestration +- **[specfact init](/core-cli/init/)** — Bootstrap bundles and optional IDE setup +- **[specfact module](/core-cli/module/)** — Install, enable, and upgrade workflow modules +- **[specfact upgrade](/core-cli/upgrade/)** — CLI self-update -Installed modules mount workflows under `project`, `backlog`, `code`, `spec`, and `govern`. +Installed modules add command groups such as `project`, `backlog`, `code`, `spec`, and `govern`. Deeper bundle docs live on [modules.specfact.io](https://modules.specfact.io/). ## Modules Documentation diff --git a/openspec/changes/docs-new-user-onboarding/tasks.md b/openspec/changes/docs-new-user-onboarding/tasks.md index 6a10ff78..5636e56e 100644 --- a/openspec/changes/docs-new-user-onboarding/tasks.md +++ b/openspec/changes/docs-new-user-onboarding/tasks.md @@ -109,8 +109,9 @@ - [x] 7c.4 Exit non-zero only if at least one module failed (not if skipped/already installed) - [x] 7c.5 Verify: single-module install still works identically; all existing flags apply - [x] 7c.6 Write failing test: `specfact module uninstall A B` uninstalls both A and B -- [ ] 7c.7 Write failing test: `specfact module uninstall A B` where A is not installed — +- [x] 7c.7 Write failing test: `specfact module uninstall A B` where A is not installed — reports A not found, still attempts B, exits non-zero + *(Catches `click.exceptions.Exit` from `typer.Exit`; upgrade uses `Optional[list[str]]` for Click 8.1 + Typer 0.23.)* - [x] 7c.8 Change `uninstall` Argument from `module_name: str` to `module_names: list[str]`; update `@require` guard; loop through each name using existing uninstall logic - [x] 7c.9 Verify: single-module uninstall still works identically; `--scope`/`--repo` apply @@ -192,6 +193,7 @@ ## 11. Spec Sync +- [x] 11.0 GitHub backlog: issue [#476](https://github.com/nold-ai/specfact-cli/issues/476) with labels `enhancement`, `change-proposal`, `documentation`, `openspec`; parent feature [#356](https://github.com/nold-ai/specfact-cli/issues/356); related [#466](https://github.com/nold-ai/specfact-cli/issues/466) — `proposal.md` Source Tracking updated - [ ] 11.1 Run `openspec sync --change docs-new-user-onboarding` to merge all 10 spec deltas *(blocked: OpenSpec CLI in this environment has no `sync` subcommand — use project workflow when available)* - [ ] 11.2 Confirm `openspec/specs/docs-aha-moment-entry/spec.md` created diff --git a/src/specfact_cli/cli.py b/src/specfact_cli/cli.py index 12d76121..8d0835de 100644 --- a/src/specfact_cli/cli.py +++ b/src/specfact_cli/cli.py @@ -110,8 +110,9 @@ def resolve_command( if invoked in KNOWN_BUNDLE_GROUP_OR_SHIM_NAMES: get_configured_console().print( f"[bold red]Command '{invoked}' is not installed.[/bold red]\n" - "Install workflow bundles with [bold]specfact init --profile [/bold] " - "or [bold]specfact module install [/bold]." + "Try: [bold]uvx specfact-cli init --profile solo-developer[/bold] " + "or [bold]specfact init --profile solo-developer[/bold], " + "or install bundles with [bold]specfact module install [/bold]." ) raise SystemExit(1) from None raise @@ -123,8 +124,9 @@ def resolve_command( return result get_configured_console().print( f"[bold red]Command '{invoked}' is not installed.[/bold red]\n" - "Install workflow bundles with [bold]specfact init --profile [/bold] " - "or [bold]specfact module install [/bold]." + "Try: [bold]uvx specfact-cli init --profile solo-developer[/bold] " + "or [bold]specfact init --profile solo-developer[/bold], " + "or install bundles with [bold]specfact module install [/bold]." ) raise SystemExit(1) diff --git a/src/specfact_cli/modules/init/module-package.yaml b/src/specfact_cli/modules/init/module-package.yaml index 79bfa74d..1b9e14bb 100644 --- a/src/specfact_cli/modules/init/module-package.yaml +++ b/src/specfact_cli/modules/init/module-package.yaml @@ -1,5 +1,5 @@ name: init -version: 0.1.21 +version: 0.1.22 commands: - init category: core @@ -17,5 +17,5 @@ publisher: description: Initialize SpecFact workspace and bootstrap local configuration. license: Apache-2.0 integrity: - checksum: sha256:925fd303b581441618597b5fa5d7f308cbc31405455ae7811243c072616974bf - signature: uMDekXAxcKEDb8V1rqEeL8X806bP4otT5rT5ShXFlUNjcJ06c90s7xg1KqTNYj/eVsuh07xKSGnWKo1BG6juAw== + checksum: sha256:741b980089306ea36002e5bb50c1f019aedf0936ca3f8852835e4b6c72138d35 + signature: XZtSMh24mVI4aIYsp1dGA+5Q4GAc9619PGX3wY+DlB+Vm2mCutkBQsGs1E2Mvfjjzj/ghXrmmlJMFFoLpZt3AA== diff --git a/src/specfact_cli/modules/init/src/commands.py b/src/specfact_cli/modules/init/src/commands.py index 057c5d0c..a0af06b6 100644 --- a/src/specfact_cli/modules/init/src/commands.py +++ b/src/specfact_cli/modules/init/src/commands.py @@ -433,6 +433,7 @@ def _install_profile_bundles(profile: str, install_root: Path, non_interactive: install_root, non_interactive=non_interactive, ) + console.print(f"[green]Installed:[/green] {', '.join(bundle_ids)}") @beartype @@ -445,6 +446,7 @@ def _install_bundle_list(install_arg: str, install_root: Path, non_interactive: install_root, non_interactive=non_interactive, ) + console.print(f"[green]Installed:[/green] {', '.join(bundle_ids)}") def _apply_profile_or_install_bundles(profile: str | None, install: str | None) -> None: diff --git a/src/specfact_cli/modules/init/src/first_run_selection.py b/src/specfact_cli/modules/init/src/first_run_selection.py index 66032066..5765217f 100644 --- a/src/specfact_cli/modules/init/src/first_run_selection.py +++ b/src/specfact_cli/modules/init/src/first_run_selection.py @@ -12,7 +12,7 @@ PROFILE_PRESETS: dict[str, list[str]] = { - "solo-developer": ["specfact-codebase"], + "solo-developer": ["specfact-codebase", "specfact-code-review"], "backlog-team": ["specfact-backlog", "specfact-project", "specfact-codebase"], "api-first-team": ["specfact-spec", "specfact-codebase"], "enterprise-full-stack": [ @@ -30,8 +30,14 @@ "specfact-codebase", "specfact-spec", "specfact-govern", + "specfact-code-review", ) +# Marketplace module IDs for bundles that are not shipped as bundled (core) modules. +MARKETPLACE_BUNDLE_IDS: dict[str, str] = { + "specfact-code-review": "nold-ai/specfact-code-review", +} + BUNDLE_ALIAS_TO_CANONICAL: dict[str, str] = { "project": "specfact-project", "backlog": "specfact-backlog", @@ -47,6 +53,7 @@ "specfact-codebase": ["analyze", "drift", "validate", "repro"], "specfact-spec": ["contract", "spec", "sdd", "generate"], "specfact-govern": ["enforce", "patch_mode"], + "specfact-code-review": [], # marketplace-only; installed via MARKETPLACE_BUNDLE_IDS } BUNDLE_DEPENDENCIES: dict[str, list[str]] = { @@ -149,6 +156,29 @@ def _add_bundle(bid: str) -> None: _add_bundle(bid) for bid in to_install: + marketplace_id = MARKETPLACE_BUNDLE_IDS.get(bid) + if marketplace_id is not None: + try: + from specfact_cli.registry.module_installer import install_module + + install_module( + marketplace_id, + install_root=root, + trust_non_official=trust_non_official, + non_interactive=non_interactive, + ) + except Exception as e: + from specfact_cli.common import get_bridge_logger + + logger = get_bridge_logger(__name__) + logger.warning( + "Marketplace bundle install failed for %s: %s.", + bid, + e, + ) + raise + continue + module_names = BUNDLE_TO_MODULE_NAMES.get(bid, []) for module_name in module_names: try: diff --git a/src/specfact_cli/modules/module_registry/module-package.yaml b/src/specfact_cli/modules/module_registry/module-package.yaml index 8944331e..99a8a647 100644 --- a/src/specfact_cli/modules/module_registry/module-package.yaml +++ b/src/specfact_cli/modules/module_registry/module-package.yaml @@ -1,5 +1,5 @@ name: module-registry -version: 0.1.12 +version: 0.1.13 commands: - module category: core @@ -17,5 +17,5 @@ publisher: description: 'Manage modules: search, list, show, install, and upgrade.' license: Apache-2.0 integrity: - checksum: sha256:c73488f1e4966e97cb3c71fbd89ad631bc07beb3c5a795f1b81c53c2f4291803 - signature: 1vEDdIav1yUIPSxkkMLPODj6zoDB/QTcR/CJYn27OZRIVBCFU8Cyx+6MWgC79lAjiOK69wSYQSgyixP+NPwcDg== + checksum: sha256:2925d4f84696a6404abd0ed3ba1a905a0a9e74a8343ab8568a4eb69f071b9bfe + signature: 3bmXM6yHE+E/w+cHnCIKBnjlZWm+IYQpff6UX4mSD/GWovgq5BylipvKXpp7Z5OHR+ls6pOdT4o/9gMmcTS9Cw== diff --git a/src/specfact_cli/modules/module_registry/src/commands.py b/src/specfact_cli/modules/module_registry/src/commands.py index a993b5f6..7cc84368 100644 --- a/src/specfact_cli/modules/module_registry/src/commands.py +++ b/src/specfact_cli/modules/module_registry/src/commands.py @@ -5,12 +5,14 @@ import inspect import shutil from pathlib import Path -from typing import Any, cast +from typing import Annotated, Any, cast import typer import yaml from beartype import beartype +from click.exceptions import Exit as ClickExit from icontract import require +from packaging.version import InvalidVersion, Version from rich.console import Console from rich.table import Table @@ -21,6 +23,7 @@ from specfact_cli.registry.marketplace_client import fetch_registry_index from specfact_cli.registry.module_discovery import discover_all_modules from specfact_cli.registry.module_installer import ( + REGISTRY_ID_FILE, USER_MODULES_ROOT, get_bundled_module_metadata, install_bundled_module, @@ -83,8 +86,10 @@ def _list_source_filter_ok(source: str | None) -> bool: return source is None or source in ("builtin", "project", "user", "marketplace", "custom") -def _upgrade_module_name_optional(module_name: str | None) -> bool: - return module_name is None or module_name.strip() != "" +def _upgrade_module_names_valid(module_names: list[str] | None) -> bool: + if module_names is None: + return True + return all(m.strip() != "" for m in module_names) def _publisher_url_from_metadata(metadata: object | None) -> str: @@ -236,12 +241,53 @@ def init_modules( console.print(f"[green]Seeded {seeded} module(s) into {target_root}[/green]") +def _install_one( + module_id: str, + scope_normalized: str, + source_normalized: str, + target_root: Path, + version: str | None, + reinstall: bool, + trust_non_official: bool, + skip_deps: bool, + force: bool, + discovered_by_name: dict[str, Any], +) -> bool: + """Install a single module; return True on success, False if skipped/already installed.""" + normalized, requested_name = _normalize_install_module_id(module_id) + if _install_skip_if_already_satisfied(scope_normalized, requested_name, target_root, reinstall, discovered_by_name): + return True + if _try_install_bundled_module(source_normalized, requested_name, normalized, target_root, trust_non_official): + return True + try: + installed_path = install_module( + normalized, + version=version, + reinstall=reinstall, + install_root=target_root, + trust_non_official=trust_non_official, + non_interactive=is_non_interactive(), + skip_deps=skip_deps, + force=force, + ) + except Exception as exc: + console.print(f"[red]Failed installing {normalized}: {exc}[/red]") + return False + console.print(f"[green]Installed[/green] {normalized} -> {installed_path}") + publisher = _publisher_from_module_id(normalized) + if is_official_publisher(publisher): + console.print(f"Verified: official ({publisher})") + return True + + @app.command() @beartype -@require(_module_id_arg_nonempty, "module_id must not be empty") def install( - module_id: str = typer.Argument(..., help="Module id (name or namespace/name format)"), - version: str | None = typer.Option(None, "--version", help="Install a specific version"), + module_ids: Annotated[ + list[str], + typer.Argument(help="Module id(s) (name or namespace/name); space-separated for multiple"), + ], + version: str | None = typer.Option(None, "--version", help="Install a specific version (single module only)"), scope: str = typer.Option("user", "--scope", help="Install scope: user or project"), source: str = typer.Option("auto", "--source", help="Install source: auto, bundled, or marketplace"), repo: Path | None = typer.Option(None, "--repo", help="Repository path for project scope (default: current dir)"), @@ -266,33 +312,33 @@ def install( help="Reinstall even if module is already present (e.g. to refresh integrity metadata)", ), ) -> None: - """Install a module from bundled artifacts or marketplace registry.""" + """Install one or more modules from bundled artifacts or marketplace registry.""" + if not module_ids: + console.print("[red]At least one module id is required.[/red]") + raise typer.Exit(1) scope_normalized, source_normalized = _parse_install_scope_and_source(scope, source) target_root = _resolve_install_target_root(scope_normalized, repo) - normalized, requested_name = _normalize_install_module_id(module_id) discovered_by_name = {entry.metadata.name: entry for entry in discover_all_modules()} - if _install_skip_if_already_satisfied(scope_normalized, requested_name, target_root, reinstall, discovered_by_name): - return - if _try_install_bundled_module(source_normalized, requested_name, normalized, target_root, trust_non_official): - return - try: - installed_path = install_module( - normalized, - version=version, - reinstall=reinstall, - install_root=target_root, - trust_non_official=trust_non_official, - non_interactive=is_non_interactive(), - skip_deps=skip_deps, - force=force, + failed: list[str] = [] + for module_id in module_ids: + if not module_id.strip(): + continue + success = _install_one( + module_id, + scope_normalized, + source_normalized, + target_root, + version, + reinstall, + trust_non_official, + skip_deps, + force, + discovered_by_name, ) - except Exception as exc: - console.print(f"[red]Failed installing {normalized}: {exc}[/red]") - raise typer.Exit(1) from exc - console.print(f"[green]Installed[/green] {normalized} -> {installed_path}") - publisher = _publisher_from_module_id(normalized) - if is_official_publisher(publisher): - console.print(f"Verified: official ({publisher})") + if not success: + failed.append(module_id) + if failed: + raise typer.Exit(1) def _normalize_uninstall_module_name(module_name: str) -> str: @@ -356,6 +402,22 @@ def _uninstall_from_explicit_scope( return False +def _uninstall_single_module(module_name: str, scope: str | None, repo: Path | None) -> None: + """Uninstall one module; raises ``typer.Exit`` on failure.""" + normalized = _normalize_uninstall_module_name(module_name) + repo_path = (repo or Path.cwd()).resolve() + project_root = repo_path / ".specfact" / "modules" + user_root = USER_MODULES_ROOT + project_module_dir = project_root / normalized + user_module_dir = user_root / normalized + scope_normalized = _resolve_uninstall_scope(scope, normalized, project_module_dir, user_module_dir) + if _uninstall_from_explicit_scope( + scope_normalized, normalized, project_root, user_root, project_module_dir, user_module_dir + ): + return + _uninstall_marketplace_default(normalized) + + def _uninstall_marketplace_default(normalized: str) -> None: discovered_by_name = {entry.metadata.name: entry for entry in discover_all_modules()} existing = discovered_by_name.get(normalized) @@ -389,25 +451,30 @@ def _uninstall_marketplace_default(normalized: str) -> None: @app.command() @beartype -@require(_module_name_arg_nonempty, "module_name must not be empty") def uninstall( - module_name: str = typer.Argument(..., help="Installed module name (name or namespace/name)"), + module_names: Annotated[ + list[str], + typer.Argument(help="Installed module name(s) (name or namespace/name)"), + ], scope: str | None = typer.Option(None, "--scope", help="Uninstall scope: user or project"), repo: Path | None = typer.Option(None, "--repo", help="Repository path for project scope (default: current dir)"), ) -> None: - """Uninstall a marketplace module.""" - normalized = _normalize_uninstall_module_name(module_name) - repo_path = (repo or Path.cwd()).resolve() - project_root = repo_path / ".specfact" / "modules" - user_root = USER_MODULES_ROOT - project_module_dir = project_root / normalized - user_module_dir = user_root / normalized - scope_normalized = _resolve_uninstall_scope(scope, normalized, project_module_dir, user_module_dir) - if _uninstall_from_explicit_scope( - scope_normalized, normalized, project_root, user_root, project_module_dir, user_module_dir - ): - return - _uninstall_marketplace_default(normalized) + """Uninstall one or more marketplace modules.""" + if not module_names: + console.print("[red]At least one module name is required.[/red]") + raise typer.Exit(1) + failed = False + for module_name in module_names: + stripped = module_name.strip() + if not stripped: + continue + try: + _uninstall_single_module(stripped, scope, repo) + except ClickExit as exc: + if exc.exit_code not in (0, None): + failed = True + if failed: + raise typer.Exit(1) alias_app = typer.Typer(help="Manage command aliases (map name to namespaced module)") @@ -944,40 +1011,154 @@ def show(module_name: str = typer.Argument(..., help="Installed module name")) - console.print(_build_module_details_table(module_name, module_row, metadata)) +def _upgrade_row_for_target(target: str, by_id: dict[str, dict[str, Any]]) -> dict[str, Any]: + if target in by_id: + return by_id[target] + short = target.split("/")[-1] + if short in by_id: + return by_id[short] + for key, row in by_id.items(): + if key == short or str(key).endswith(f"/{short}"): + return row + return {} + + +def _full_marketplace_module_id_for_install(target: str) -> str: + """Return ``namespace/name`` for ``install_module`` from a target key or short id.""" + t = target.strip() + if "/" in t and t.count("/") == 1: + left, right = t.split("/", 1) + if left.strip() and right.strip(): + return t + short = t.split("/")[-1] + id_file = USER_MODULES_ROOT / short / REGISTRY_ID_FILE + if id_file.exists(): + txt = id_file.read_text(encoding="utf-8").strip() + if txt and "/" in txt: + return txt + if short.startswith("specfact-"): + return f"nold-ai/{short}" + return f"nold-ai/specfact-{short}" + + +def _latest_version_from_registry_index(module_id: str) -> str | None: + idx = fetch_registry_index() + if not idx: + return None + mods = idx.get("modules", []) + if not isinstance(mods, list): + return None + for raw in mods: + if isinstance(raw, dict) and str(raw.get("id", "")).strip() == module_id: + lv = raw.get("latest_version") + if lv is None: + return None + s = str(lv).strip() + return s or None + return None + + +def _versions_equal_for_upgrade(current: str, latest: str) -> bool: + try: + return Version(current) == Version(latest) + except (InvalidVersion, ValueError): + return current.strip() == latest.strip() + + +def _is_major_version_increase(current: str, latest: str) -> bool: + try: + return Version(latest).major > Version(current).major + except (InvalidVersion, ValueError): + return False + + +def _resolve_one_upgrade_name(raw: str, by_id: dict[str, dict[str, Any]]) -> str: + """Resolve a single CLI name to a module id key used in ``by_id`` / targets.""" + normalized = raw.strip() + if not normalized: + return normalized + candidates = [normalized] + if "/" not in normalized and f"specfact-{normalized}" in by_id: + candidates.append(f"specfact-{normalized}") + for cand in candidates: + if cand in by_id: + source = str(by_id[cand].get("source", "unknown")) + if source != "marketplace": + console.print( + f"[red]Cannot upgrade '{cand}' from source '{source}'. " + "Only marketplace modules are upgradeable.[/red]" + ) + raise typer.Exit(1) + return cand + if "/" in normalized: + return normalized + return f"specfact/{normalized}" + + def _resolve_upgrade_target_ids( - module_name: str | None, - all: bool, + module_names: list[str] | None, + all_flag: bool, modules: list[dict[str, Any]], by_id: dict[str, dict[str, Any]], ) -> list[str]: - target_ids: list[str] = [] - if all or module_name is None: + if all_flag or not module_names: target_ids = [str(m.get("id", "")) for m in modules if str(m.get("source", "")) == "marketplace"] if not target_ids: console.print("[yellow]No marketplace-installed modules found to upgrade.[/yellow]") return target_ids - normalized = module_name - if normalized in by_id: - source = str(by_id[normalized].get("source", "unknown")) - if source != "marketplace": - console.print( - f"[red]Cannot upgrade '{normalized}' from source '{source}'. Only marketplace modules are upgradeable.[/red]" - ) - raise typer.Exit(1) - return [normalized] - prefixed = normalized if "/" in normalized else f"specfact/{normalized}" - return [prefixed] + return [_resolve_one_upgrade_name(raw, by_id) for raw in module_names] -def _run_marketplace_upgrades(target_ids: list[str], by_id: dict[str, dict[str, Any]]) -> None: +def _run_marketplace_upgrades( + target_ids: list[str], + by_id: dict[str, dict[str, Any]], + *, + yes: bool = False, +) -> None: upgraded: list[tuple[str, str, str]] = [] + up_to_date: list[str] = [] + skipped_major: list[tuple[str, str, str]] = [] failed: list[str] = [] + for target in target_ids: try: - module_id = target if "/" in target else f"specfact/{target}" - previous_version = str(by_id.get(target, {}).get("version", "unknown")) - installed_path = install_module(module_id, reinstall=True) - upgraded.append((module_id, previous_version, _read_installed_module_version(installed_path))) + full_id = _full_marketplace_module_id_for_install(target) + row = _upgrade_row_for_target(target, by_id) + current_v = str(row.get("version", "unknown")).strip() + latest_v = str(row.get("latest_version") or "").strip() + if not latest_v: + latest_v = (_latest_version_from_registry_index(full_id) or "").strip() + + if latest_v and _versions_equal_for_upgrade(current_v, latest_v): + up_to_date.append(full_id) + continue + + if not latest_v: + installed_path = install_module(full_id, reinstall=True) + upgraded.append((full_id, current_v, _read_installed_module_version(installed_path))) + continue + + should_install = True + if _is_major_version_increase(current_v, latest_v): + if yes: + should_install = True + elif is_non_interactive(): + console.print( + f"[yellow]Skipping major upgrade for {full_id}: {current_v} -> {latest_v} " + "(non-interactive; use --yes to approve)[/yellow]" + ) + skipped_major.append((full_id, current_v, latest_v)) + should_install = False + elif not typer.confirm( + f"Major version upgrade for {full_id} ({current_v} -> {latest_v}). Continue?", + default=False, + ): + skipped_major.append((full_id, current_v, latest_v)) + should_install = False + + if should_install: + installed_path = install_module(full_id, reinstall=True) + upgraded.append((full_id, current_v, _read_installed_module_version(installed_path))) except Exception as exc: console.print(f"[red]Failed upgrading {target}: {exc}[/red]") failed.append(target) @@ -986,29 +1167,42 @@ def _run_marketplace_upgrades(target_ids: list[str], by_id: dict[str, dict[str, console.print("[green]Upgraded:[/green]") for module_id, previous_version, new_version in upgraded: console.print(f" {module_id}: {previous_version} -> {new_version}") + + if up_to_date: + if upgraded or skipped_major: + console.print("[green]Already up to date:[/green]") + for mid in up_to_date: + console.print(f" {mid}") + else: + console.print("[green]All modules are up to date.[/green]") + + if skipped_major: + console.print("[yellow]Skipped (major bump):[/yellow]") + for mid, cv, lv in skipped_major: + console.print(f" {mid}: {cv} -> {lv}") + if failed: raise typer.Exit(1) @app.command() @beartype -@require( - _upgrade_module_name_optional, - "module_name must be non-empty if provided", -) +@require(_upgrade_module_names_valid, "each module name must be non-empty") def upgrade( - module_name: str | None = typer.Argument( - None, help="Installed module name (optional; omit to upgrade all marketplace modules)" - ), + module_names: Annotated[ + list[str] | None, + typer.Argument(help="Installed module name(s); omit to upgrade all marketplace modules"), + ] = None, all: bool = typer.Option(False, "--all", help="Upgrade all installed marketplace modules"), + yes: bool = typer.Option(False, "--yes", "-y", help="Approve major version upgrades without prompting"), ) -> None: """Upgrade marketplace module(s) to latest available versions.""" modules = get_modules_with_state() by_id = {str(m.get("id", "")): m for m in modules} - target_ids = _resolve_upgrade_target_ids(module_name, all, modules, by_id) + target_ids = _resolve_upgrade_target_ids(module_names, all, modules, by_id) if not target_ids: return - _run_marketplace_upgrades(target_ids, by_id) + _run_marketplace_upgrades(target_ids, by_id, yes=yes) # Expose standard ModuleIOContract operations for protocol compliance discovery. diff --git a/src/specfact_cli/registry/dependency_resolver.py b/src/specfact_cli/registry/dependency_resolver.py index c1c44d21..0a1d4bc1 100644 --- a/src/specfact_cli/registry/dependency_resolver.py +++ b/src/specfact_cli/registry/dependency_resolver.py @@ -58,11 +58,37 @@ def _run_pip_compile(constraints: list[str]) -> list[str]: return [L.strip() for L in out.splitlines() if L.strip() and not L.strip().startswith("#")] +@beartype +def _pip_module_available() -> bool: + """Return True if pip is importable in the current Python environment.""" + try: + result = subprocess.run( + [sys.executable, "-m", "pip", "--version"], + capture_output=True, + text=True, + timeout=5, + check=False, + ) + return result.returncode == 0 + except (FileNotFoundError, subprocess.TimeoutExpired, OSError): + return False + + @beartype def _run_basic_resolver(constraints: list[str]) -> list[str]: - """Fallback: use pip's resolver (e.g. pip install --dry-run). Returns best-effort pinned list.""" + """Fallback: use pip's resolver (e.g. pip install --dry-run). Returns best-effort pinned list. + + When pip is not available (e.g. uvx environment), skips validation and returns constraints as-is. + Pip packages will be validated at actual install time by the host package manager. + """ if not constraints: return [] + if not _pip_module_available(): + logger.warning( + "pip is not available in the current environment (e.g. uvx). " + "Skipping pip dependency validation — packages will be checked at install time." + ) + return constraints logger.warning("pip-tools not found, using basic resolver") with tempfile.TemporaryDirectory() as tmp: reqs = Path(tmp) / "requirements.in" diff --git a/src/specfact_cli/registry/module_installer.py b/src/specfact_cli/registry/module_installer.py index e004966b..dcf4b6cb 100644 --- a/src/specfact_cli/registry/module_installer.py +++ b/src/specfact_cli/registry/module_installer.py @@ -132,13 +132,17 @@ def _download_archive_with_cache(module_id: str, version: str | None = None) -> @beartype def _extract_bundle_dependencies(metadata: dict[str, Any]) -> list[str]: - """Extract validated bundle dependency module ids from raw manifest metadata.""" + """Extract validated bundle dependency module ids from raw manifest metadata. + + Supports both plain string entries ("namespace/name") and versioned object entries + ({"id": "namespace/name", "version": ">=x.y.z"}). + """ raw_dependencies = metadata.get("bundle_dependencies", []) if not isinstance(raw_dependencies, list): return [] dependencies: list[str] = [] for value in raw_dependencies: - dep = str(value).strip() + dep = str(value.get("id", "")).strip() if isinstance(value, dict) else str(value).strip() if not dep: continue _validate_marketplace_namespace_format(dep) @@ -725,7 +729,11 @@ def _validate_install_manifest_constraints( assert_module_allowed(manifest_module_name) compatibility = str(metadata.get("core_compatibility", "")).strip() if compatibility and Version(cli_version) not in SpecifierSet(compatibility): - raise ValueError("Module is incompatible with current SpecFact CLI version") + raise ValueError( + f"Module '{manifest_module_name}' requires SpecFact CLI {compatibility}, " + f"but the installed version is {cli_version}. " + f"Run: specfact upgrade (or: pip install --upgrade specfact-cli)" + ) publisher_name: str | None = None publisher_raw = metadata.get("publisher") if isinstance(publisher_raw, dict): diff --git a/tests/unit/specfact_cli/modules/test_module_upgrade_improvements.py b/tests/unit/specfact_cli/modules/test_module_upgrade_improvements.py new file mode 100644 index 00000000..0f13b595 --- /dev/null +++ b/tests/unit/specfact_cli/modules/test_module_upgrade_improvements.py @@ -0,0 +1,229 @@ +"""Tests for module upgrade command improvements. + +Spec: openspec/changes/docs-new-user-onboarding/specs/module-installation/spec.md +Tasks: 7b.1 - 7b.13 +""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any +from unittest.mock import patch + +import click +import pytest +from typer.testing import CliRunner + +from specfact_cli.modules.module_registry.src.commands import _run_marketplace_upgrades + + +runner = CliRunner() + + +def _unstyled(text: str) -> str: + return click.unstyle(text) + + +# ── Scenario: Upgrade when module is already at latest version (no X->X) ────── + + +def test_run_marketplace_upgrades_skips_reinstall_when_at_latest(tmp_path: Path) -> None: + """When latest_version == current_version, module must NOT be reinstalled and must NOT appear in 'Upgraded:' with X->X.""" + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": { + "version": "0.41.16", + "source": "marketplace", + "latest_version": "0.41.16", + } + } + + install_called = [] + + def _fake_install(module_id: str, reinstall: bool = False, **kwargs: object) -> Path: + install_called.append(module_id) + return tmp_path / "backlog" + + with patch("specfact_cli.modules.module_registry.src.commands.install_module", side_effect=_fake_install): + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id) + + assert not install_called, "install_module must NOT be called when module is already at latest version" + + +def test_run_marketplace_upgrades_all_at_latest_prints_up_to_date( + tmp_path: Path, capsys: pytest.CaptureFixture[str] +) -> None: + """When all modules are at latest, output must say 'All modules are up to date' and no X->X lines.""" + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": {"version": "0.41.16", "source": "marketplace", "latest_version": "0.41.16"}, + "nold-ai/specfact-codebase": {"version": "0.44.0", "source": "marketplace", "latest_version": "0.44.0"}, + } + + with patch("specfact_cli.modules.module_registry.src.commands.install_module") as mock_install: + from io import StringIO + + output_buf = StringIO() + from rich.console import Console + + test_console = Console(file=output_buf, highlight=False, markup=True) + with patch("specfact_cli.modules.module_registry.src.commands.console", test_console): + _run_marketplace_upgrades(["nold-ai/specfact-backlog", "nold-ai/specfact-codebase"], by_id) + + output = output_buf.getvalue() + + mock_install.assert_not_called() + assert "0.41.16 -> 0.41.16" not in output, "Must not show X->X lines when nothing changed" + assert "0.44.0 -> 0.44.0" not in output, "Must not show X->X lines when nothing changed" + + +def test_run_marketplace_upgrades_mixed_result_shows_sections(tmp_path: Path) -> None: + """With mixed results, output has 'Upgraded:' and 'Already up to date:' sections.""" + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": {"version": "0.41.16", "source": "marketplace", "latest_version": "0.42.0"}, + "nold-ai/specfact-codebase": {"version": "0.44.0", "source": "marketplace", "latest_version": "0.44.0"}, + } + + def _fake_install(module_id: str, reinstall: bool = False, **kwargs: object) -> Path: + if "backlog" in module_id: + return tmp_path / "backlog" + raise AssertionError(f"Should not install {module_id}") + + def _fake_read_version(module_dir: Path) -> str: + if "backlog" in str(module_dir): + return "0.42.0" + return "0.44.0" + + from io import StringIO + + from rich.console import Console + + output_buf = StringIO() + test_console = Console(file=output_buf, highlight=False, markup=True) + + with ( + patch("specfact_cli.modules.module_registry.src.commands.install_module", side_effect=_fake_install), + patch( + "specfact_cli.modules.module_registry.src.commands._read_installed_module_version", + side_effect=_fake_read_version, + ), + patch("specfact_cli.modules.module_registry.src.commands.console", test_console), + ): + _run_marketplace_upgrades(["nold-ai/specfact-backlog", "nold-ai/specfact-codebase"], by_id) + + output = output_buf.getvalue() + assert "Upgraded" in output, "Must have Upgraded section" + assert "up to date" in output.lower(), "Must have 'Already up to date' section" + assert "0.41.16 -> 0.42.0" in output or "backlog" in output + + +# ── Scenario: Upgrade multiple named modules selectively ────────────────────── + + +def test_upgrade_command_accepts_multiple_module_names(tmp_path: Path) -> None: + """upgrade command must accept multiple positional module names.""" + from specfact_cli.cli import app + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.get_modules_with_state", + return_value=[ + { + "id": "nold-ai/specfact-backlog", + "version": "0.41.16", + "source": "marketplace", + "latest_version": "0.42.0", + }, + { + "id": "nold-ai/specfact-codebase", + "version": "0.44.0", + "source": "marketplace", + "latest_version": "0.44.0", + }, + ], + ), + patch( + "specfact_cli.modules.module_registry.src.commands._run_marketplace_upgrades", + ), + patch("specfact_cli.modules.module_registry.src.commands._resolve_upgrade_target_ids") as mock_resolve, + ): + mock_resolve.return_value = ["nold-ai/specfact-backlog", "nold-ai/specfact-codebase"] + result = runner.invoke(app, ["module", "upgrade", "backlog", "codebase"]) + + # Should not show "No such argument" error + assert "No such argument" not in _unstyled(result.output), result.output + # May succeed (exit 0) or fail for other reasons, but not because of wrong arg count + assert result.exit_code != 2, f"Exit code 2 suggests wrong args: {result.output}" + + +# ── Scenario: Breaking major version upgrade requires confirmation ───────────── + + +def test_run_marketplace_upgrades_prompts_for_major_bump(tmp_path: Path) -> None: + """_run_marketplace_upgrades must prompt before upgrading when major version increases.""" + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": {"version": "0.41.16", "source": "marketplace", "latest_version": "1.0.0"}, + } + + from io import StringIO + + from rich.console import Console + + output_buf = StringIO() + test_console = Console(file=output_buf, highlight=False, markup=True) + + prompt_shown = [] + + def _fake_confirm(message: str, **kwargs: object) -> bool: + prompt_shown.append(message) + return False # User declines + + with ( + patch("specfact_cli.modules.module_registry.src.commands.console", test_console), + patch("specfact_cli.modules.module_registry.src.commands.typer.confirm", side_effect=_fake_confirm), + patch("specfact_cli.modules.module_registry.src.commands.install_module") as mock_install, + ): + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id) + + output = output_buf.getvalue() + # Must show major bump warning + assert "major" in output.lower() or prompt_shown, "Must warn about major version bump" + mock_install.assert_not_called() # User declined → must not install + + +def test_run_marketplace_upgrades_skips_major_in_ci_mode(tmp_path: Path) -> None: + """In CI/CD (non-interactive), major bumps are skipped with a warning; install is not called.""" + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": {"version": "0.41.16", "source": "marketplace", "latest_version": "1.0.0"}, + } + + with ( + patch("specfact_cli.modules.module_registry.src.commands.is_non_interactive", return_value=True), + patch("specfact_cli.modules.module_registry.src.commands.install_module") as mock_install, + ): + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, yes=False) + + mock_install.assert_not_called() + + +def test_run_marketplace_upgrades_yes_flag_skips_major_bump_prompt(tmp_path: Path) -> None: + """With yes=True, major version bumps proceed without prompt.""" + by_id: dict[str, dict[str, Any]] = { + "nold-ai/specfact-backlog": {"version": "0.41.16", "source": "marketplace", "latest_version": "1.0.0"}, + } + + def _fake_install(module_id: str, **kwargs: object) -> Path: + return tmp_path / "backlog" + + def _fake_read_version(p: Path) -> str: + return "1.0.0" + + with ( + patch("specfact_cli.modules.module_registry.src.commands.install_module", side_effect=_fake_install), + patch( + "specfact_cli.modules.module_registry.src.commands._read_installed_module_version", + side_effect=_fake_read_version, + ), + patch("specfact_cli.modules.module_registry.src.commands.typer.confirm") as mock_confirm, + ): + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, yes=True) + + mock_confirm.assert_not_called() # --yes flag skips prompt diff --git a/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py b/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py new file mode 100644 index 00000000..42dfa5e0 --- /dev/null +++ b/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py @@ -0,0 +1,208 @@ +"""Tests for multi-module install and uninstall. + +Spec: openspec/changes/docs-new-user-onboarding/specs/module-installation/spec.md +Tasks: 7c.1 - 7c.9 +""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any +from unittest.mock import patch + +import click +from typer.testing import CliRunner + +from specfact_cli.cli import app + + +runner = CliRunner() + + +def _unstyled(text: str) -> str: + return click.unstyle(text) + + +# ── Scenario: Multi-install ──────────────────────────────────────────────────── + + +def test_module_install_accepts_multiple_ids() -> None: + """specfact module install A B must accept two positional arguments.""" + installed: list[str] = [] + + def _fake_install(module_id: str, **kwargs: object) -> Path: + installed.append(module_id) + return Path(f"/tmp/{module_id.split('/')[1]}") + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.install_module", + side_effect=_fake_install, + ), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=[], + ), + patch( + "specfact_cli.modules.module_registry.src.commands._install_skip_if_already_satisfied", + return_value=False, + ), + patch( + "specfact_cli.modules.module_registry.src.commands._try_install_bundled_module", + return_value=False, + ), + ): + result = runner.invoke(app, ["module", "install", "nold-ai/specfact-codebase", "nold-ai/specfact-code-review"]) + + output = _unstyled(result.output) + assert result.exit_code != 2, f"Exit code 2 = CLI arg error; got: {output}" + assert "nold-ai/specfact-codebase" in installed or "specfact-codebase" in str(installed), ( + f"Both modules should be installed; installed={installed}" + ) + assert "nold-ai/specfact-code-review" in installed or "specfact-code-review" in str(installed) + + +def test_module_install_single_still_works() -> None: + """Single-module install must still work after multi-install change.""" + installed: list[str] = [] + + def _fake_install(module_id: str, **kwargs: object) -> Path: + installed.append(module_id) + return Path(f"/tmp/{module_id.split('/')[1]}") + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.install_module", + side_effect=_fake_install, + ), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=[], + ), + patch( + "specfact_cli.modules.module_registry.src.commands._install_skip_if_already_satisfied", + return_value=False, + ), + patch( + "specfact_cli.modules.module_registry.src.commands._try_install_bundled_module", + return_value=False, + ), + ): + result = runner.invoke(app, ["module", "install", "nold-ai/specfact-codebase"]) + + assert result.exit_code != 2, f"Exit code 2 = CLI arg error: {_unstyled(result.output)}" + assert len(installed) == 1 + + +def test_module_install_multi_skips_already_installed_and_continues() -> None: + """Multi-install: if A is already installed, skip A but still install B; exit 0.""" + installed: list[str] = [] + + def _fake_skip(scope: str, name: str, root: Path, reinstall: bool, discovered: Any) -> bool: + return "codebase" in name # A is already installed + + def _fake_install(module_id: str, **kwargs: object) -> Path: + installed.append(module_id) + return Path(f"/tmp/{module_id.split('/')[1]}") + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.install_module", + side_effect=_fake_install, + ), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=[], + ), + patch( + "specfact_cli.modules.module_registry.src.commands._install_skip_if_already_satisfied", + side_effect=_fake_skip, + ), + patch( + "specfact_cli.modules.module_registry.src.commands._try_install_bundled_module", + return_value=False, + ), + ): + result = runner.invoke(app, ["module", "install", "nold-ai/specfact-codebase", "nold-ai/specfact-code-review"]) + + assert result.exit_code == 0, f"Should exit 0 when only one is skipped: {_unstyled(result.output)}" + assert any("code-review" in mid for mid in installed), "B must still be installed even if A was skipped" + + +# ── Scenario: Multi-uninstall ───────────────────────────────────────────────── + + +def test_module_uninstall_accepts_multiple_names() -> None: + """specfact module uninstall A B must accept two positional arguments.""" + uninstalled: list[str] = [] + + def _fake_uninstall(module_name: str, **kwargs: object) -> None: + uninstalled.append(module_name) + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.uninstall_module", + side_effect=_fake_uninstall, + ), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=[ + type("E", (), {"metadata": type("M", (), {"name": "specfact-codebase"})(), "source": "marketplace"})(), + type( + "E", (), {"metadata": type("M", (), {"name": "specfact-code-review"})(), "source": "marketplace"} + )(), + ], + ), + ): + result = runner.invoke(app, ["module", "uninstall", "specfact-codebase", "specfact-code-review"]) + + output = _unstyled(result.output) + assert result.exit_code != 2, f"Exit code 2 = CLI arg error: {output}" + + +def test_module_uninstall_single_still_works() -> None: + """Single-module uninstall must still work after multi-uninstall change.""" + with ( + patch("specfact_cli.modules.module_registry.src.commands.uninstall_module"), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=[ + type("E", (), {"metadata": type("M", (), {"name": "specfact-codebase"})(), "source": "marketplace"})(), + ], + ), + ): + result = runner.invoke(app, ["module", "uninstall", "specfact-codebase"]) + + assert result.exit_code != 2, f"Exit code 2 = CLI arg error: {_unstyled(result.output)}" + + +def test_module_uninstall_multi_missing_first_reports_error_still_uninstalls_rest_exits_nonzero() -> None: + """7c.7: If A is not installed, report error, still uninstall B, exit non-zero.""" + uninstalled: list[str] = [] + + def _fake_uninstall(module_name: str, **kwargs: object) -> None: + uninstalled.append(module_name) + + discovered = [ + type("E", (), {"metadata": type("M", (), {"name": "specfact-code-review"})(), "source": "marketplace"})(), + ] + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.uninstall_module", + side_effect=_fake_uninstall, + ), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=discovered, + ), + ): + result = runner.invoke( + app, + ["module", "uninstall", "specfact-codebase", "specfact-code-review"], + ) + + assert uninstalled == ["specfact-code-review"], ( + "Missing module must not block uninstall of remaining names; got " + repr(uninstalled) + ) + assert result.exit_code == 1, "Overall exit must be non-zero when any name failed" diff --git a/tests/unit/specfact_cli/registry/test_dependency_resolver_pip_free.py b/tests/unit/specfact_cli/registry/test_dependency_resolver_pip_free.py new file mode 100644 index 00000000..a17b7d4c --- /dev/null +++ b/tests/unit/specfact_cli/registry/test_dependency_resolver_pip_free.py @@ -0,0 +1,76 @@ +"""Tests for pip-free dependency resolver fallback. + +Spec: openspec/changes/docs-new-user-onboarding/specs/first-run-selection/spec.md +Bug 2: module install fails under uvx with "No module named pip" +""" + +from __future__ import annotations + +from typing import cast +from unittest.mock import patch + +from specfact_cli.models.module_package import ModulePackageMetadata +from specfact_cli.registry.dependency_resolver import _run_basic_resolver, resolve_dependencies + + +def test_run_basic_resolver_returns_constraints_when_pip_unavailable() -> None: + """When pip is unavailable (uvx environment), basic resolver must not raise — return constraints.""" + constraints = ["requests>=2.28.0", "pyyaml>=6.0"] + + import subprocess + + def _pip_not_available(*cmd_args: object, **kwargs: object) -> subprocess.CompletedProcess[str]: + return subprocess.CompletedProcess( + args=cast(list[str | bytes], [str(a) for a in cmd_args]), + returncode=1, + stdout="", + stderr="No module named pip", + ) + + with patch("specfact_cli.registry.dependency_resolver.subprocess.run", side_effect=_pip_not_available): + result = _run_basic_resolver(constraints) + + # Must not raise; must return something (constraints or empty list) + assert isinstance(result, list), "Should return a list even when pip is unavailable" + + +def test_resolve_dependencies_does_not_raise_when_pip_unavailable() -> None: + """resolve_dependencies must complete without raising when pip and pip-compile are both unavailable.""" + module = ModulePackageMetadata( + name="test-module", + version="0.1.0", + commands=["test"], + pip_dependencies=["requests>=2.28.0"], + ) + + with ( + patch("specfact_cli.registry.dependency_resolver._pip_tools_available", return_value=False), + patch( + "specfact_cli.registry.dependency_resolver._run_basic_resolver", + return_value=["requests>=2.28.0"], + ) as mock_basic, + ): + result = resolve_dependencies([module]) + + mock_basic.assert_called_once() + assert isinstance(result, list) + + +def test_resolve_dependencies_empty_modules_returns_empty() -> None: + """resolve_dependencies with no pip deps must return [] without calling pip.""" + module = ModulePackageMetadata( + name="no-pip-deps", + version="0.1.0", + commands=["cmd"], + pip_dependencies=[], + ) + with patch("specfact_cli.registry.dependency_resolver._pip_tools_available") as mock_check: + result = resolve_dependencies([module]) + + mock_check.assert_not_called() + assert result == [] + + +def test_basic_resolver_returns_empty_for_empty_constraints() -> None: + result = _run_basic_resolver([]) + assert result == [] diff --git a/tests/unit/specfact_cli/registry/test_profile_presets.py b/tests/unit/specfact_cli/registry/test_profile_presets.py new file mode 100644 index 00000000..28bd4821 --- /dev/null +++ b/tests/unit/specfact_cli/registry/test_profile_presets.py @@ -0,0 +1,145 @@ +"""Tests for profile presets and init --profile module installation. + +Spec: openspec/changes/docs-new-user-onboarding/specs/profile-presets/spec.md +Spec: openspec/changes/docs-new-user-onboarding/specs/first-run-selection/spec.md +""" + +from __future__ import annotations + +from pathlib import Path +from unittest.mock import patch + +import pytest + +from specfact_cli.modules.init.src.first_run_selection import ( + CANONICAL_BUNDLES, + PROFILE_PRESETS, + install_bundles_for_init, + resolve_profile_bundles, +) + + +# ── Scenario: Profile canonical bundle mapping is machine-verifiable ────────── + + +def test_solo_developer_includes_specfact_code_review() -> None: + """solo-developer profile MUST include specfact-code-review.""" + bundles = PROFILE_PRESETS["solo-developer"] + assert "specfact-code-review" in bundles, f"solo-developer must include specfact-code-review; got {bundles}" + + +def test_solo_developer_includes_specfact_codebase() -> None: + """solo-developer profile MUST include specfact-codebase.""" + bundles = PROFILE_PRESETS["solo-developer"] + assert "specfact-codebase" in bundles + + +def test_solo_developer_canonical_set() -> None: + """solo-developer canonical set is exactly [specfact-codebase, specfact-code-review].""" + expected = {"specfact-codebase", "specfact-code-review"} + actual = set(PROFILE_PRESETS["solo-developer"]) + assert actual == expected, f"Expected {expected}, got {actual}" + + +def test_specfact_code_review_in_canonical_bundles() -> None: + """specfact-code-review must be in CANONICAL_BUNDLES.""" + assert "specfact-code-review" in CANONICAL_BUNDLES + + +def test_backlog_team_canonical_set() -> None: + expected = {"specfact-project", "specfact-backlog", "specfact-codebase"} + assert set(PROFILE_PRESETS["backlog-team"]) == expected + + +def test_api_first_team_canonical_set() -> None: + expected = {"specfact-spec", "specfact-codebase"} + assert set(PROFILE_PRESETS["api-first-team"]) == expected + + +def test_enterprise_full_stack_canonical_set() -> None: + expected = { + "specfact-project", + "specfact-backlog", + "specfact-codebase", + "specfact-spec", + "specfact-govern", + } + assert set(PROFILE_PRESETS["enterprise-full-stack"]) == expected + + +def test_resolve_profile_bundles_solo_developer() -> None: + bundles = resolve_profile_bundles("solo-developer") + assert "specfact-codebase" in bundles + assert "specfact-code-review" in bundles + + +def test_resolve_profile_bundles_invalid_raises() -> None: + with pytest.raises(ValueError, match="Unknown profile"): + resolve_profile_bundles("unknown-profile") + + +# ── Scenario: install_bundles_for_init installs marketplace modules ──────────── + + +def test_install_bundles_for_init_calls_marketplace_for_code_review(tmp_path: Path) -> None: + """install_bundles_for_init must call the marketplace installer for specfact-code-review.""" + installed_marketplace_ids: list[str] = [] + + def _fake_install_module(module_id: str, **kwargs: object) -> Path: + installed_marketplace_ids.append(module_id) + return tmp_path / module_id.split("/")[1] + + with ( + patch( + "specfact_cli.registry.module_installer.install_bundled_module", + return_value=False, + ), + patch( + "specfact_cli.registry.module_installer.install_module", + side_effect=_fake_install_module, + ), + ): + install_bundles_for_init( + ["specfact-code-review"], + install_root=tmp_path, + non_interactive=True, + ) + + assert any("specfact-code-review" in mid for mid in installed_marketplace_ids), ( + f"install_module was not called with specfact-code-review; calls: {installed_marketplace_ids}" + ) + + +def test_install_bundles_for_init_solo_developer_installs_both(tmp_path: Path) -> None: + """Running install_bundles_for_init for solo-developer bundles installs both codebase and code-review.""" + installed_modules: list[str] = [] + installed_marketplace_ids: list[str] = [] + + def _fake_bundled(module_name: str, root: Path, **kwargs: object) -> bool: + installed_modules.append(module_name) + return True + + def _fake_marketplace(module_id: str, **kwargs: object) -> Path: + installed_marketplace_ids.append(module_id) + return tmp_path / module_id.split("/")[1] + + with ( + patch( + "specfact_cli.registry.module_installer.install_bundled_module", + side_effect=_fake_bundled, + ), + patch( + "specfact_cli.registry.module_installer.install_module", + side_effect=_fake_marketplace, + ), + ): + install_bundles_for_init( + ["specfact-codebase", "specfact-code-review"], + install_root=tmp_path, + non_interactive=True, + ) + + assert len(installed_modules) > 0, "Bundled modules should be installed for specfact-codebase" + assert any("specfact-code-review" in mid for mid in installed_marketplace_ids), ( + "Marketplace installer should be called for specfact-code-review" + ) diff --git a/tests/unit/specfact_cli/registry/test_versioned_bundle_deps.py b/tests/unit/specfact_cli/registry/test_versioned_bundle_deps.py new file mode 100644 index 00000000..7bbd9487 --- /dev/null +++ b/tests/unit/specfact_cli/registry/test_versioned_bundle_deps.py @@ -0,0 +1,83 @@ +"""Tests for versioned bundle dependency resolution. + +Spec: openspec/changes/docs-new-user-onboarding/specs/dependency-resolution/spec.md +Tasks: 7d.1 - 7d.10 +""" + +from __future__ import annotations + +from typing import Any + +from specfact_cli.registry.module_installer import _extract_bundle_dependencies + + +# ── Scenario: Registry entry declares a versioned bundle dependency ─────────── + + +def test_extract_bundle_dependencies_handles_versioned_object() -> None: + """_extract_bundle_dependencies must handle {"id": "...", "version": ">=x.y.z"} form.""" + metadata: dict[str, Any] = {"bundle_dependencies": [{"id": "nold-ai/specfact-project", "version": ">=0.41.0"}]} + deps = _extract_bundle_dependencies(metadata) + assert "nold-ai/specfact-project" in deps, f"Versioned object form not handled; got {deps}" + + +def test_extract_bundle_dependencies_handles_plain_string() -> None: + """_extract_bundle_dependencies must still handle plain string entries (backward compat).""" + metadata: dict[str, Any] = {"bundle_dependencies": ["nold-ai/specfact-project"]} + deps = _extract_bundle_dependencies(metadata) + assert "nold-ai/specfact-project" in deps + + +def test_extract_bundle_dependencies_handles_mixed_list() -> None: + """_extract_bundle_dependencies must handle a mix of string and versioned object entries.""" + metadata: dict[str, Any] = { + "bundle_dependencies": [ + "nold-ai/specfact-project", + {"id": "nold-ai/specfact-codebase", "version": ">=0.40.0"}, + ] + } + deps = _extract_bundle_dependencies(metadata) + assert "nold-ai/specfact-project" in deps + assert "nold-ai/specfact-codebase" in deps + + +def test_extract_bundle_dependencies_empty_list() -> None: + metadata: dict[str, Any] = {"bundle_dependencies": []} + deps = _extract_bundle_dependencies(metadata) + assert deps == [] + + +def test_extract_bundle_dependencies_missing_key() -> None: + metadata: dict[str, Any] = {} + deps = _extract_bundle_dependencies(metadata) + assert deps == [] + + +# ── core_compatibility actionable error ─────────────────────────────────────── + + +def test_validate_install_manifest_constraints_actionable_error(tmp_path: object) -> None: + """core_compatibility mismatch must produce actionable message, not bare ValueError.""" + from specfact_cli.registry.module_installer import _validate_install_manifest_constraints + + metadata: dict[str, Any] = { + "name": "specfact-code-review", + "version": "0.1.0", + "core_compatibility": ">=99.0.0,<100.0.0", # impossibly high — always fails + } + + import pytest + + with pytest.raises((ValueError, SystemExit)) as exc_info: + _validate_install_manifest_constraints( + metadata, + "specfact-code-review", + trust_non_official=True, + non_interactive=True, + ) + + exc_val = str(exc_info.value) + # Must include version info, not just "incompatible" + assert any( + phrase in exc_val.lower() for phrase in ["requires", "specfact cli", ">=", "run:", "upgrade", "99.0.0"] + ), f"Error message not actionable: {exc_val!r}" diff --git a/tests/unit/specfact_cli/test_module_not_found_error.py b/tests/unit/specfact_cli/test_module_not_found_error.py new file mode 100644 index 00000000..b8706773 --- /dev/null +++ b/tests/unit/specfact_cli/test_module_not_found_error.py @@ -0,0 +1,57 @@ +"""Tests for module-not-found error including corrective command. + +Spec: openspec/changes/docs-new-user-onboarding/specs/docs-vibecoder-entry-path/spec.md +Tasks: 6.1 - 6.3 +""" + +from __future__ import annotations + +import click +from typer.testing import CliRunner + +from specfact_cli.cli import app + + +runner = CliRunner() + + +def _unstyled(text: str) -> str: + return click.unstyle(text) + + +def test_module_not_found_error_includes_init_command() -> None: + """When a known command group is not installed, error must include the init command.""" + result = runner.invoke(app, ["code", "review", "run"]) + + output = _unstyled(result.output) + + # Must fail + assert result.exit_code != 0 + + # Must include the corrective init command + assert "init" in output, f"Error must mention 'init' command: {output!r}" + assert "--profile" in output or "profile" in output, f"Error must suggest --profile option: {output!r}" + + +def test_module_not_found_error_includes_uvx_command() -> None: + """Module-not-found error must include uvx-compatible init command for uvx users.""" + result = runner.invoke(app, ["code", "review", "run"]) + + output = _unstyled(result.output) + + assert result.exit_code != 0 + # Should include uvx-compatible command or at least solo-developer profile + assert "solo-developer" in output or "uvx" in output or "specfact init" in output, ( + f"Error must include actionable uvx init command: {output!r}" + ) + + +def test_module_not_found_error_includes_solo_developer_profile() -> None: + """Module-not-found error for 'code' command must specifically mention solo-developer profile.""" + result = runner.invoke(app, ["code"]) + + output = _unstyled(result.output) + + assert result.exit_code != 0 + # The corrective command must be copy-pasteable + assert "solo-developer" in output, f"Error must mention 'solo-developer' profile as corrective action: {output!r}" From de8eabbfaa56bd5814ed48e4c40d1f8a4b1af73e Mon Sep 17 00:00:00 2001 From: Dominikus Nold Date: Thu, 2 Apr 2026 22:18:02 +0200 Subject: [PATCH 2/8] test(docs): align first-contact contracts and stabilize module CLI tests - docs/index: restore Why does it exist?, tagline, OpenSpec, canonical core CLI story - Update init profile tests for solo-developer + install all (code-review, six bundles) - Lean help test accepts uvx init hint; upgrade/core_compatibility tests match runtime - Autouse fixture re-bootstraps CommandRegistry after category-group tests - Rebase tasks conflict resolved; TDD_EVIDENCE + tasks for gates 7.1/7.2/12.1/12.2 Made-with: Cursor --- docs/index.md | 13 ++++++++---- openspec/CHANGE_ORDER.md | 2 +- .../docs-new-user-onboarding/TDD_EVIDENCE.md | 17 ++++++++++++++++ .../docs-new-user-onboarding/proposal.md | 9 +++++++++ .../changes/docs-new-user-onboarding/tasks.md | 9 +++++---- tests/unit/cli/test_lean_help_output.py | 5 ++++- .../modules/init/test_first_run_selection.py | 20 +++++++++++-------- .../modules/module_registry/test_commands.py | 2 +- tests/unit/registry/test_module_installer.py | 2 +- .../test_multi_module_install_uninstall.py | 15 ++++++++++++++ 10 files changed, 74 insertions(+), 20 deletions(-) diff --git a/docs/index.md b/docs/index.md index 8a81e46e..abbfd845 100644 --- a/docs/index.md +++ b/docs/index.md @@ -28,19 +28,25 @@ You should see a **Verdict** (PASS/FAIL), a **Score**, and a list of findings (f SpecFact does **not** include built-in AI. It pairs deterministic CLI commands with your chosen IDE and copilot so fast-moving work has a stronger validation and alignment layer around it. +**SpecFact is the validation and alignment layer for software delivery.** + --- ## What is SpecFact? -SpecFact helps you keep backlog intent, specifications, implementation, and validation from drifting apart. +SpecFact helps you keep backlog intent, specifications, implementation, and validation from drifting apart. It supports spec-first handoffs with **OpenSpec** and spec-kit style workflows so brownfield and AI-assisted teams can keep backlog language, specs, and code aligned. It is especially useful when: - AI-assisted or “vibe-coded” work needs more rigor -- brownfield systems need trustworthy reverse-engineered understanding +- brownfield and legacy code need trustworthy reverse-engineered understanding of existing systems - teams want to avoid the “I wanted X but got Y” delivery failure - organizations need a path toward stronger shared policy enforcement +## Why does it exist? + +SpecFact exists because backlog/spec/code drift is expensive: teams ship the wrong thing, AI-assisted changes skip validation, and policy enforcement breaks down across IDEs and CI. SpecFact gives you a default starting point before you jump into module-deep workflows on the modules site. + ## Why should I use it? Use SpecFact when you want faster delivery without losing validation, stronger brownfield understanding before making changes, and less drift between backlog intent, specifications, and the code that actually lands. @@ -106,8 +112,7 @@ Installed modules add command groups such as `project`, `backlog`, `code`, `spec ## Modules Documentation -`docs.specfact.io` is the default starting point. Move to the modules site when you need deeper -bundle-specific workflows, adapters, and authoring guidance. +`docs.specfact.io` is the default starting point for the core CLI story and the **canonical starting point for the core CLI story** on this site. Move to the modules site when you need **module-deep workflows**, bundle-specific adapters, and authoring guidance. - **[Modules Docs Home](https://modules.specfact.io/)** - Backlog, project, spec, govern - **[Module Development](https://modules.specfact.io/authoring/module-development/)** - Build your own modules diff --git a/openspec/CHANGE_ORDER.md b/openspec/CHANGE_ORDER.md index fd36b268..c7e71951 100644 --- a/openspec/CHANGE_ORDER.md +++ b/openspec/CHANGE_ORDER.md @@ -126,7 +126,7 @@ The 2026-03-22 clean-code plan adds one new cross-repo change pair and re-sequen | docs | 08 | docs-12-docs-validation-ci | [#440](https://github.com/nold-ai/specfact-cli/issues/440) | docs-05-core-site-ia-restructure; docs-07-core-handoff-conversion; modules-repo/docs-06 through docs-10 | | docs | 09 | docs-13-core-nav-search-theme-roles | [#458](https://github.com/nold-ai/specfact-cli/issues/458) | docs-05-core-site-ia-restructure; docs-07-core-handoff-conversion; docs-12-docs-validation-ci; modules-repo/docs-13-nav-search-theme-roles (design parity only, no content ownership coupling) | | docs | 10 | docs-14-first-contact-story-and-onboarding (in progress) | [#466](https://github.com/nold-ai/specfact-cli/issues/466) | docs-05-core-site-ia-restructure ✅; docs-07-core-handoff-conversion ✅; docs-12-docs-validation-ci ✅; docs-13-core-nav-search-theme-roles ✅; Parent Feature: [#356](https://github.com/nold-ai/specfact-cli/issues/356) | -| docs | 11 | docs-new-user-onboarding | pending | docs-14-first-contact-story-and-onboarding (overlap: onboarding UX); vibe-coder uvx hero + CLI wow-path fixes | +| docs | 11 | docs-new-user-onboarding | [#476](https://github.com/nold-ai/specfact-cli/issues/476) | Parent Feature: [#356](https://github.com/nold-ai/specfact-cli/issues/356); related [#466](https://github.com/nold-ai/specfact-cli/issues/466); vibe-coder uvx hero + CLI wow-path fixes | ### Docs refactoring plan addendum (2026-03-23) diff --git a/openspec/changes/docs-new-user-onboarding/TDD_EVIDENCE.md b/openspec/changes/docs-new-user-onboarding/TDD_EVIDENCE.md index b47474aa..a8663dc1 100644 --- a/openspec/changes/docs-new-user-onboarding/TDD_EVIDENCE.md +++ b/openspec/changes/docs-new-user-onboarding/TDD_EVIDENCE.md @@ -23,3 +23,20 @@ - **`specfact code review run --path .` without `--scope full`**: UX lives primarily in the **specfact-code-review** module (`nold-ai/specfact-cli-modules`); not changed in this repo. - **`openspec sync --change …`**: local OpenSpec CLI has no `sync` subcommand in this environment; run the project’s documented sync workflow when available before archive. - **7d full dependency-resolution wiring**: `_extract_bundle_dependencies` + message improvements landed; interactive dep resolution / `--dry-run` / graph (7d.11–7d.16) remain for a follow-up change if not bundled here. + +## 2026-04-02 (rebase + gate continuation) + +### Commands run (passing) + +- `git rebase origin/dev` (resolved `tasks.md` conflict; kept 7c.7 + 11.0) +- `hatch run yaml-lint` +- `hatch run contract-test` +- `hatch run pytest tests/unit -n 0 -q --no-cov` (full unit suite) + +### Fixes for dev merge + +- `docs/index.md`: restored first-contact story strings (`Why does it exist?`, tagline, canonical core CLI story, OpenSpec mention) for `test_first_contact_story` / `test_core_docs_site_contract` / `test_release_docs_parity`. +- `test_first_run_selection.py`: expectations for `solo-developer` + `install all` include `specfact-code-review` / six canonical bundles. +- `test_lean_help_output.py`: accept uvx init hint alongside `` placeholder. +- `test_commands.py` / `test_module_installer.py`: align with `nold-ai/specfact-backlog` install id and new `core_compatibility` error text. +- `test_multi_module_install_uninstall.py`: autouse fixture re-bootstraps `CommandRegistry` + `rebuild_root_app_from_registry()` after category-group tests mutate global CLI state. diff --git a/openspec/changes/docs-new-user-onboarding/proposal.md b/openspec/changes/docs-new-user-onboarding/proposal.md index 9bc636e1..6d214fd8 100644 --- a/openspec/changes/docs-new-user-onboarding/proposal.md +++ b/openspec/changes/docs-new-user-onboarding/proposal.md @@ -132,3 +132,12 @@ can truthfully describe a vibe-coder entry sequence. - `openspec/specs/first-run-selection/spec.md` — delta (profile install requirement) - `openspec/specs/profile-presets/spec.md` — delta (solo-developer bundle list) - New specs: `docs-aha-moment-entry`, `docs-vibecoder-entry-path` + +## Source Tracking + +- **GitHub Issue**: [#476](https://github.com/nold-ai/specfact-cli/issues/476) +- **Issue URL**: https://github.com/nold-ai/specfact-cli/issues/476 +- **Parent Feature**: [#356](https://github.com/nold-ai/specfact-cli/issues/356) — Documentation & Discrepancy Remediation ([tracking comment](https://github.com/nold-ai/specfact-cli/issues/356#issuecomment-4180162525)) +- **Related (overlap)**: [#466](https://github.com/nold-ai/specfact-cli/issues/466) — first-contact / onboarding ([cross-link comment](https://github.com/nold-ai/specfact-cli/issues/466#issuecomment-4180162609)) +- **Repository**: nold-ai/specfact-cli +- **Last Synced Status**: in-progress — issue created with labels `enhancement`, `change-proposal`, `documentation`, `openspec` diff --git a/openspec/changes/docs-new-user-onboarding/tasks.md b/openspec/changes/docs-new-user-onboarding/tasks.md index 5636e56e..2f906073 100644 --- a/openspec/changes/docs-new-user-onboarding/tasks.md +++ b/openspec/changes/docs-new-user-onboarding/tasks.md @@ -67,8 +67,9 @@ ## 7. Run pre-docs TDD gate -- [ ] 7.1 Run `hatch run contract-test` — confirm passing *(run before PR merge)* -- [ ] 7.2 Run `hatch run smart-test` — confirm passing *(run before PR merge)* +- [x] 7.1 Run `hatch run contract-test` — confirm passing *(run before PR merge)* +- [x] 7.2 Run `hatch run smart-test` — confirm passing *(run before PR merge)* + *(Full unit suite: `hatch run pytest tests/unit -n 0 -q --no-cov` — 2026-04-02)* - [x] 7.3 Run `hatch run format` and `hatch run type-check` — confirm zero errors - [x] 7.4 Record post-fix passing evidence in `TDD_EVIDENCE.md` - [ ] 7.5 End-to-end manual test on a clean machine: `uvx specfact-cli init --profile solo-developer` @@ -204,8 +205,8 @@ ## 12. Final Validation and Evidence -- [ ] 12.1 Run `hatch run yaml-lint` — confirm zero failures *(before PR)* -- [ ] 12.2 Run `hatch run contract-test` — confirm passing *(before PR)* +- [x] 12.1 Run `hatch run yaml-lint` — confirm zero failures *(before PR)* +- [x] 12.2 Run `hatch run contract-test` — confirm passing *(before PR)* - [ ] 12.3 Run `hatch run specfact code review run --json --out .specfact/code-review.json` and confirm zero findings on modified Python files *(before PR)* - [ ] 12.4 Build docs locally (`bundle exec jekyll serve`) and manually verify: diff --git a/tests/unit/cli/test_lean_help_output.py b/tests/unit/cli/test_lean_help_output.py index af864f08..18eff08c 100644 --- a/tests/unit/cli/test_lean_help_output.py +++ b/tests/unit/cli/test_lean_help_output.py @@ -82,7 +82,10 @@ def test_root_group_unknown_bundle_command_shows_install_guidance(capsys: pytest assert exc_info.value.code == 1 captured = capsys.readouterr() assert "Command 'backlog' is not installed." in captured.out - assert "specfact init --profile " in captured.out + assert ( + "specfact init --profile " in captured.out + or "uvx specfact-cli init --profile solo-developer" in captured.out + ) assert "module install " in captured.out diff --git a/tests/unit/modules/init/test_first_run_selection.py b/tests/unit/modules/init/test_first_run_selection.py index 309b392d..1b305ded 100644 --- a/tests/unit/modules/init/test_first_run_selection.py +++ b/tests/unit/modules/init/test_first_run_selection.py @@ -27,9 +27,9 @@ def _telemetry_track_context(): # --- Profile resolution --- -def test_profile_solo_developer_resolves_to_specfact_codebase_only() -> None: +def test_profile_solo_developer_resolves_to_codebase_and_code_review() -> None: bundles = frs.resolve_profile_bundles("solo-developer") - assert bundles == ["specfact-codebase"] + assert bundles == ["specfact-codebase", "specfact-code-review"] def test_profile_enterprise_full_stack_resolves_to_all_five_bundles() -> None: @@ -61,7 +61,7 @@ def test_install_backlog_codebase_resolves_to_two_bundles() -> None: assert len(bundles) == 2 -def test_install_all_resolves_to_all_five_bundles() -> None: +def test_install_all_resolves_to_all_canonical_bundles() -> None: bundles = frs.resolve_install_bundles("all") assert set(bundles) == { "specfact-project", @@ -69,8 +69,9 @@ def test_install_all_resolves_to_all_five_bundles() -> None: "specfact-codebase", "specfact-spec", "specfact-govern", + "specfact-code-review", } - assert len(bundles) == 5 + assert len(bundles) == 6 def test_install_unknown_bundle_raises() -> None: @@ -133,7 +134,7 @@ def _discover(_builtin=None, user_root=None, **_kwargs): # --- CLI: specfact init --profile (mock installer) --- -def test_init_profile_solo_developer_calls_installer_with_specfact_codebase( +def test_init_profile_solo_developer_calls_installer_with_codebase_and_code_review( monkeypatch: pytest.MonkeyPatch, tmp_path: Path ) -> None: install_calls: list[list[str]] = [] @@ -160,7 +161,7 @@ def _fake_install_bundles(bundle_ids: list[str], install_root: Path, **kwargs: o ) assert result.exit_code == 0, result.output assert len(install_calls) == 1 - assert install_calls[0] == ["specfact-codebase"] + assert install_calls[0] == ["specfact-codebase", "specfact-code-review"] def test_init_profile_enterprise_full_stack_calls_installer_with_all_five( @@ -251,7 +252,9 @@ def _fake_install_bundles(bundle_ids: list[str], install_root: Path, **kwargs: o assert set(install_calls[0]) == {"specfact-backlog", "specfact-codebase"} -def test_init_install_all_calls_installer_with_five_bundles(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: +def test_init_install_all_calls_installer_with_all_canonical_bundles( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path +) -> None: install_calls: list[list[str]] = [] def _fake_install_bundles(bundle_ids: list[str], install_root: Path, **kwargs: object) -> None: @@ -276,13 +279,14 @@ def _fake_install_bundles(bundle_ids: list[str], install_root: Path, **kwargs: o ) assert result.exit_code == 0, result.output assert len(install_calls) == 1 - assert len(install_calls[0]) == 5 + assert len(install_calls[0]) == 6 assert set(install_calls[0]) == { "specfact-project", "specfact-backlog", "specfact-codebase", "specfact-spec", "specfact-govern", + "specfact-code-review", } diff --git a/tests/unit/modules/module_registry/test_commands.py b/tests/unit/modules/module_registry/test_commands.py index 14a254eb..7ceb2c75 100644 --- a/tests/unit/modules/module_registry/test_commands.py +++ b/tests/unit/modules/module_registry/test_commands.py @@ -1055,7 +1055,7 @@ def _install(module_id: str, version=None, reinstall: bool = False): result = runner.invoke(app, ["upgrade"]) assert result.exit_code == 0 - assert installed == ["specfact/backlog"] + assert installed == ["nold-ai/specfact-backlog"] assert reinstall_flags == [True] assert "Upgraded" in result.stdout diff --git a/tests/unit/registry/test_module_installer.py b/tests/unit/registry/test_module_installer.py index e17aab89..f2f8129d 100644 --- a/tests/unit/registry/test_module_installer.py +++ b/tests/unit/registry/test_module_installer.py @@ -218,7 +218,7 @@ def test_install_module_validates_core_compatibility(monkeypatch, tmp_path: Path tarball = _create_module_tarball(tmp_path, "policy", core_compatibility=">=9.0.0") monkeypatch.setattr("specfact_cli.registry.module_installer.download_module", lambda *_args, **_kwargs: tarball) - with pytest.raises(ValueError, match="incompatible with current SpecFact CLI version"): + with pytest.raises(ValueError, match="requires SpecFact CLI"): install_module("specfact/policy", install_root=tmp_path / "marketplace-modules") diff --git a/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py b/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py index 42dfa5e0..b4ed2735 100644 --- a/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py +++ b/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py @@ -6,11 +6,13 @@ from __future__ import annotations +from collections.abc import Generator from pathlib import Path from typing import Any from unittest.mock import patch import click +import pytest from typer.testing import CliRunner from specfact_cli.cli import app @@ -19,6 +21,19 @@ runner = CliRunner() +@pytest.fixture(autouse=True) +def _reset_cli_registry_for_module_tests() -> Generator[None, None, None]: + """Other tests (e.g. category groups) rebuild ``app`` from a cleared registry; re-bootstrap.""" + from specfact_cli.cli import rebuild_root_app_from_registry + from specfact_cli.registry import CommandRegistry + from specfact_cli.registry.bootstrap import register_builtin_commands + + CommandRegistry._clear_for_testing() + register_builtin_commands() + rebuild_root_app_from_registry() + yield + + def _unstyled(text: str) -> str: return click.unstyle(text) From 50270266c84415e9440cdf676b833978c2dc2399 Mon Sep 17 00:00:00 2001 From: Dominikus Nold Date: Thu, 2 Apr 2026 22:26:52 +0200 Subject: [PATCH 3/8] fix(tools): smart-test baseline and pre-commit single code-review run - Run full suite when smart-test cache has no last_full_run; force+auto falls back to full when incremental is a no-op - Pre-commit: invoke pre_commit_code_review.py once (no xargs split) so .specfact/code-review.json is not clobbered - Tests and OpenSpec tasks for docs-new-user-onboarding Made-with: Cursor --- .../changes/docs-new-user-onboarding/tasks.md | 9 +++--- scripts/pre-commit-smart-checks.sh | 16 ++++++++--- .../test_pre_commit_smart_checks_docs.py | 3 ++ .../test_multi_module_install_uninstall.py | 15 ---------- tests/unit/tools/test_smart_test_coverage.py | 2 +- .../test_smart_test_coverage_enhanced.py | 24 +++++++++++++++- tools/smart_test_coverage.py | 28 +++++++++++++++---- 7 files changed, 66 insertions(+), 31 deletions(-) diff --git a/openspec/changes/docs-new-user-onboarding/tasks.md b/openspec/changes/docs-new-user-onboarding/tasks.md index 2f906073..5636e56e 100644 --- a/openspec/changes/docs-new-user-onboarding/tasks.md +++ b/openspec/changes/docs-new-user-onboarding/tasks.md @@ -67,9 +67,8 @@ ## 7. Run pre-docs TDD gate -- [x] 7.1 Run `hatch run contract-test` — confirm passing *(run before PR merge)* -- [x] 7.2 Run `hatch run smart-test` — confirm passing *(run before PR merge)* - *(Full unit suite: `hatch run pytest tests/unit -n 0 -q --no-cov` — 2026-04-02)* +- [ ] 7.1 Run `hatch run contract-test` — confirm passing *(run before PR merge)* +- [ ] 7.2 Run `hatch run smart-test` — confirm passing *(run before PR merge)* - [x] 7.3 Run `hatch run format` and `hatch run type-check` — confirm zero errors - [x] 7.4 Record post-fix passing evidence in `TDD_EVIDENCE.md` - [ ] 7.5 End-to-end manual test on a clean machine: `uvx specfact-cli init --profile solo-developer` @@ -205,8 +204,8 @@ ## 12. Final Validation and Evidence -- [x] 12.1 Run `hatch run yaml-lint` — confirm zero failures *(before PR)* -- [x] 12.2 Run `hatch run contract-test` — confirm passing *(before PR)* +- [ ] 12.1 Run `hatch run yaml-lint` — confirm zero failures *(before PR)* +- [ ] 12.2 Run `hatch run contract-test` — confirm passing *(before PR)* - [ ] 12.3 Run `hatch run specfact code review run --json --out .specfact/code-review.json` and confirm zero findings on modified Python files *(before PR)* - [ ] 12.4 Build docs locally (`bundle exec jekyll serve`) and manually verify: diff --git a/scripts/pre-commit-smart-checks.sh b/scripts/pre-commit-smart-checks.sh index 5876e458..1606b8f3 100755 --- a/scripts/pre-commit-smart-checks.sh +++ b/scripts/pre-commit-smart-checks.sh @@ -185,15 +185,23 @@ run_actionlint_if_needed() { } run_code_review_gate() { - local py_files - py_files=$(staged_python_files) - if [ -z "${py_files}" ]; then + # Build a bash array so we invoke pre_commit_code_review.py exactly once. Using xargs + # here can split into multiple subprocesses when the argument list is long (default + # max-chars), each overwriting .specfact/code-review.json — yielding partial or empty + # findings and a misleading artifact. + local py_array=() + while IFS= read -r line; do + [ -z "${line}" ] && continue + py_array+=("${line}") + done < <(staged_python_files) + + if [ ${#py_array[@]} -eq 0 ]; then info "ℹ️ No staged Python files — skipping code review gate" return fi info "🛡️ Running code review gate on staged Python files" - if echo "${py_files}" | xargs -r hatch run python scripts/pre_commit_code_review.py; then + if hatch run python scripts/pre_commit_code_review.py "${py_array[@]}"; then success "✅ Code review gate passed" else error "❌ Code review gate failed" diff --git a/tests/unit/scripts/test_pre_commit_smart_checks_docs.py b/tests/unit/scripts/test_pre_commit_smart_checks_docs.py index d8bf8c15..5688de23 100644 --- a/tests/unit/scripts/test_pre_commit_smart_checks_docs.py +++ b/tests/unit/scripts/test_pre_commit_smart_checks_docs.py @@ -30,3 +30,6 @@ def test_pre_commit_runs_code_review_gate_before_contract_tests() -> None: assert "run_code_review_gate" in script assert "hatch run python scripts/pre_commit_code_review.py" in script assert "run_code_review_gate\n\n# Contract-first test flow" in script + # Single invocation with all staged files — xargs can split into multiple runs and + # clobber .specfact/code-review.json (partial or empty findings). + assert '"${py_array[@]}"' in script diff --git a/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py b/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py index b4ed2735..42dfa5e0 100644 --- a/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py +++ b/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py @@ -6,13 +6,11 @@ from __future__ import annotations -from collections.abc import Generator from pathlib import Path from typing import Any from unittest.mock import patch import click -import pytest from typer.testing import CliRunner from specfact_cli.cli import app @@ -21,19 +19,6 @@ runner = CliRunner() -@pytest.fixture(autouse=True) -def _reset_cli_registry_for_module_tests() -> Generator[None, None, None]: - """Other tests (e.g. category groups) rebuild ``app`` from a cleared registry; re-bootstrap.""" - from specfact_cli.cli import rebuild_root_app_from_registry - from specfact_cli.registry import CommandRegistry - from specfact_cli.registry.bootstrap import register_builtin_commands - - CommandRegistry._clear_for_testing() - register_builtin_commands() - rebuild_root_app_from_registry() - yield - - def _unstyled(text: str) -> str: return click.unstyle(text) diff --git a/tests/unit/tools/test_smart_test_coverage.py b/tests/unit/tools/test_smart_test_coverage.py index 8971bdba..aa6dbaf0 100644 --- a/tests/unit/tools/test_smart_test_coverage.py +++ b/tests/unit/tools/test_smart_test_coverage.py @@ -473,7 +473,7 @@ def test_show_latest_log(self, caplog): @patch.object(SmartCoverageManager, "_run_changed_only") def test_run_smart_tests_with_changes(self, mock_changed_only): """Test running smart tests when changes are detected (changed-only mode).""" - mock_changed_only.return_value = True + mock_changed_only.return_value = (True, True) with ( patch.object(self.manager, "_has_source_changes", return_value=True), diff --git a/tests/unit/tools/test_smart_test_coverage_enhanced.py b/tests/unit/tools/test_smart_test_coverage_enhanced.py index c59f85fb..9a4869ca 100644 --- a/tests/unit/tools/test_smart_test_coverage_enhanced.py +++ b/tests/unit/tools/test_smart_test_coverage_enhanced.py @@ -205,12 +205,34 @@ def test_run_smart_tests_auto_with_changes(self): patch.object(self.manager, "_has_config_changes", return_value=False), patch.object(self.manager, "_run_changed_only") as mock_changed_only, ): - mock_changed_only.return_value = True + mock_changed_only.return_value = (True, True) result = self.manager.run_smart_tests("auto") assert result is True mock_changed_only.assert_called_once() + def test_run_changed_only_without_baseline_runs_full(self): + """No last_full_run: incremental cannot compute diffs; must run full suite.""" + self.manager.cache.pop("last_full_run", None) + with patch.object(self.manager, "_run_full_tests", return_value=True) as mock_full: + ok, ran_any = self.manager._run_changed_only() + assert ok is True + assert ran_any is True + mock_full.assert_called_once() + + def test_run_smart_tests_force_auto_with_no_incremental_runs_full(self): + """Force + auto with no mapped tests: run full suite instead of no-op skip.""" + with ( + patch.object(self.manager, "_has_source_changes", return_value=True), + patch.object(self.manager, "_has_test_changes", return_value=False), + patch.object(self.manager, "_has_config_changes", return_value=False), + patch.object(self.manager, "_run_changed_only", return_value=(True, False)), + patch.object(self.manager, "_run_full_tests", return_value=True) as mock_full, + ): + result = self.manager.run_smart_tests("auto", force=True) + assert result is True + mock_full.assert_called_once() + def test_run_smart_tests_auto_without_changes(self): """Test smart tests in auto mode without changes.""" with ( diff --git a/tools/smart_test_coverage.py b/tools/smart_test_coverage.py index 5beb777b..c447ddf7 100755 --- a/tools/smart_test_coverage.py +++ b/tools/smart_test_coverage.py @@ -1485,7 +1485,11 @@ def run_smart_tests(self, test_level: str = "auto", force: bool = False) -> bool config_changed = self._has_config_changes() if source_changed or test_changed or config_changed or force: - return self._run_changed_only() + ok, ran_any = self._run_changed_only() + if force and not ran_any: + logger.info("Force mode: no incremental tests ran — running full suite.") + return self._run_full_tests() + return ok # No changes - use cached data status = self.get_status() logger.info( @@ -1706,11 +1710,25 @@ def _run_full_tests(self) -> bool: self._update_cache(True, test_count, coverage_percentage, enforce_threshold=False) return success - def _run_changed_only(self) -> bool: + def _run_changed_only(self) -> tuple[bool, bool]: """Run only tests impacted by changes since last cached hashes. + + Returns: + (success, ran_any): whether the run succeeded, and whether any test + subprocess actually ran (False means everything was skipped as no-op). + - Unit: tests mapped from modified source files + directly modified unit tests - Integration/E2E: only directly modified tests - No full-suite fallback here; CI should catch broader regressions.""" + Baseline: if there is no ``last_full_run`` in cache, runs the full suite first. + ``run_smart_tests(..., force=True)`` may also fall back to full when incremental + mode would otherwise no-op. + """ + if not self.cache.get("last_full_run"): + logger.info( + "No prior full test run in cache — running full suite to establish baseline for incremental runs." + ) + return self._run_full_tests(), True + # Collect modified items modified_sources = self._get_modified_files() modified_tests = self._get_modified_test_files() @@ -1774,9 +1792,9 @@ def dedupe(paths: list[Path]) -> list[Path]: logger.info("No changed files detected that map to tests - skipping test execution") # Still keep cache timestamp to allow future git comparisons self._update_cache(True, 0, self.cache.get("coverage_percentage", 0.0), enforce_threshold=False) - return True + return True, False - return overall_success + return overall_success, True @require(lambda test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) @ensure(lambda result: isinstance(result, bool), "force_full_run must return bool") From 818ce339921b12cc5cb4fe765f0990f7cae69f9b Mon Sep 17 00:00:00 2001 From: Dominikus Nold Date: Thu, 2 Apr 2026 22:33:21 +0200 Subject: [PATCH 4/8] test: fix CI backlog copy assertions and module install test isolation - Align backlog not-installed tests with solo-developer init guidance (no placeholder) - Autouse: reset CommandRegistry, register_builtin_commands, rebuild_root_app_from_registry so module install tests work after registry-only clears Made-with: Cursor --- .../integration/test_category_group_routing.py | 4 ++-- .../test_multi_module_install_uninstall.py | 18 +++++++++++++++++- .../registry/test_command_registry.py | 4 ++-- 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_category_group_routing.py b/tests/integration/test_category_group_routing.py index 10834871..58cca9f2 100644 --- a/tests/integration/test_category_group_routing.py +++ b/tests/integration/test_category_group_routing.py @@ -49,8 +49,8 @@ def test_backlog_help_lists_subcommands() -> None: assert "policy" in out or "ceremony" in out return assert "command 'backlog' is not installed." in out - assert "specfact init --profile " in out - assert "module install " in out + assert "specfact init --profile" in out + assert "module install" in out def test_validate_flat_command_is_not_available() -> None: diff --git a/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py b/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py index 42dfa5e0..ef024e30 100644 --- a/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py +++ b/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py @@ -6,14 +6,30 @@ from __future__ import annotations +from collections.abc import Generator from pathlib import Path from typing import Any from unittest.mock import patch import click +import pytest from typer.testing import CliRunner -from specfact_cli.cli import app +from specfact_cli.cli import app, rebuild_root_app_from_registry +from specfact_cli.registry import CommandRegistry +from specfact_cli.registry.bootstrap import register_builtin_commands + + +@pytest.fixture(autouse=True) +def _reset_registry_and_root_app() -> Generator[None, None, None]: + """Other tests clear ``CommandRegistry`` without re-registering; rebuild root ``app`` for Typer.""" + CommandRegistry._clear_for_testing() + register_builtin_commands() + rebuild_root_app_from_registry() + yield + CommandRegistry._clear_for_testing() + register_builtin_commands() + rebuild_root_app_from_registry() runner = CliRunner() diff --git a/tests/unit/specfact_cli/registry/test_command_registry.py b/tests/unit/specfact_cli/registry/test_command_registry.py index 4999610d..00fd6ce3 100644 --- a/tests/unit/specfact_cli/registry/test_command_registry.py +++ b/tests/unit/specfact_cli/registry/test_command_registry.py @@ -193,8 +193,8 @@ def test_cli_backlog_help_exits_zero(): return merged = (result.stdout or "") + "\n" + (result.stderr or "") assert "Command 'backlog' is not installed." in merged, (result.stdout, result.stderr) - assert "specfact init --profile " in merged, (result.stdout, result.stderr) - assert "module install " in merged, (result.stdout, result.stderr) + assert "specfact init --profile" in merged, (result.stdout, result.stderr) + assert "module install" in merged, (result.stdout, result.stderr) def test_cli_module_help_exits_zero(): From 93f2793f3777b4288613e66aacff0e2620a06f4e Mon Sep 17 00:00:00 2001 From: Dominikus Nold Date: Thu, 2 Apr 2026 22:38:21 +0200 Subject: [PATCH 5/8] docs: README wow path + tests locking entrypoint with docs - README leads with uvx init + code review run --scope full; pip install secondary - Unit contract tests: README and docs/index.md share canonical uvx strings and order - E2E: init --profile solo-developer in temp git repo; registry ready for step two with mock bundles Made-with: Cursor --- README.md | 50 ++++++----- .../docs-new-user-onboarding/TDD_EVIDENCE.md | 12 +++ tests/e2e/test_wow_entrypoint.py | 83 +++++++++++++++++++ .../unit/docs/test_wow_entrypoint_contract.py | 77 +++++++++++++++++ 4 files changed, 203 insertions(+), 19 deletions(-) create mode 100644 tests/e2e/test_wow_entrypoint.py create mode 100644 tests/unit/docs/test_wow_entrypoint_contract.py diff --git a/README.md b/README.md index 8b4653e9..10c593a9 100644 --- a/README.md +++ b/README.md @@ -71,26 +71,40 @@ With SpecFact, you get: ## How do I get started? -### Start Here (5 minutes) +### Start Here (about 2 minutes): scored code review — no `pip install` -### Install +**Point SpecFact at your code.** From a **git repository** (any branch), run two commands: ```bash -# Zero-install (recommended) -uvx specfact-cli@latest - -# Or install globally -pip install -U specfact-cli +uvx specfact-cli init --profile solo-developer +uvx specfact-cli code review run --path . --scope full ``` -### Bootstrap +You should see a **Verdict** (PASS/FAIL), a **Score**, and categorized **findings** — the fastest way to see SpecFact on real code before you dive into backlog, specs, or CI. + +- **Command 1** installs the `solo-developer` bundles (including `specfact-codebase` and `specfact-code-review`) into your user module store so `code review` and related commands are available on the next invocation. +- **Command 2** runs the clean-code review on the repo at `.`. Use **`--scope full`** on the first run so review does not depend on having local git changes. + +**Already installed the CLI?** Use the same flow with `specfact` instead of `uvx specfact-cli`: ```bash -# Recommended first run specfact init --profile solo-developer +specfact code review run --path . --scope full ``` -### Get First Value +**Read the canonical walkthrough:** **[Documentation — Quickstart](https://docs.specfact.io/getting-started/quickstart/)** · **[Installation](https://docs.specfact.io/getting-started/installation/)** (uvx-first, then persistent install). + +### Install (persistent CLI for daily use) + +```bash +pip install -U specfact-cli +``` + +You can still use **`uvx specfact-cli@latest ...`** anytime without installing; it always fetches the latest published CLI. + +### After the wow path: deeper workflows + +When you want analysis, snapshots, or sidecar validation on top of the review layer: ```bash # Analyze a codebase you care about @@ -104,10 +118,7 @@ specfact code validate sidecar init my-project /path/to/repo specfact code validate sidecar run my-project /path/to/repo ``` -That path gives you a concrete first win: SpecFact understands your project context and gives you a -validated starting point instead of jumping straight into blind change work. - -### AI IDE Setup +### AI IDE setup ```bash specfact init ide @@ -125,7 +136,7 @@ your IDE. If module prompt payloads are not installed yet, the CLI uses packaged Use SpecFact as the validation layer around fast-moving implementation work. Start with: -- `specfact init --profile solo-developer` +- `uvx specfact-cli init --profile solo-developer` then `uvx specfact-cli code review run --path . --scope full` (see **Start Here** above) - `specfact code validate sidecar init /path/to/repo` - `specfact code validate sidecar run /path/to/repo` @@ -303,10 +314,11 @@ Use `https://modules.specfact.io/` for the in-depth backlog, project, spec, gove ## How It Works (High Level) -1. **Bootstrap**: install the CLI and initialize the official bundles you need. -2. **Analyze or sync**: import code, connect backlog systems, or sync external artifacts into project bundles. -3. **Validate**: run spec, governance, and sidecar validation flows before implementation or release. -4. **Iterate safely**: use module-provided workflows while the core runtime keeps command mounting, trust, and lifecycle consistent. +1. **Bootstrap**: use **uvx** or **pip**, then `init --profile` to install the bundles you need (for example `solo-developer` for a scored **code review** first). +2. **Review or analyze**: run **`code review run`** on a repo, or import code and snapshot state for deeper workflows. +3. **Sync**: connect backlog systems or sync external artifacts into project bundles when you are ready. +4. **Validate**: run spec, governance, and sidecar validation flows before implementation or release. +5. **Iterate safely**: use module-provided workflows while the core runtime keeps command mounting, trust, and lifecycle consistent. ## Where SpecFact Fits diff --git a/openspec/changes/docs-new-user-onboarding/TDD_EVIDENCE.md b/openspec/changes/docs-new-user-onboarding/TDD_EVIDENCE.md index a8663dc1..6eb9678f 100644 --- a/openspec/changes/docs-new-user-onboarding/TDD_EVIDENCE.md +++ b/openspec/changes/docs-new-user-onboarding/TDD_EVIDENCE.md @@ -1,5 +1,17 @@ # TDD evidence — docs-new-user-onboarding +## 2026-04-02 (README + wow entrypoint contract) + +### Commands run (passing) + +- `hatch run pytest tests/unit/docs/test_wow_entrypoint_contract.py tests/e2e/test_wow_entrypoint.py tests/unit/docs/test_first_contact_story.py -v --no-cov` +- `hatch run format` + +### Summary + +- **README.md**: Rewrote **How do I get started** so the uvx two-command wow path (`init` + `code review run --scope full`) is first; persistent install and deeper workflows follow; **How It Works** updated to lead with review. +- **Tests**: `tests/unit/docs/test_wow_entrypoint_contract.py` locks README ↔ `docs/index.md` canonical command strings and section order; `tests/e2e/test_wow_entrypoint.py` runs `init --profile solo-developer` in a **temp git repo** and asserts registry readiness for the documented second step (mock bundles). + ## 2026-04-02 (implementation session) ### Commands run (passing) diff --git a/tests/e2e/test_wow_entrypoint.py b/tests/e2e/test_wow_entrypoint.py new file mode 100644 index 00000000..b9d76d0f --- /dev/null +++ b/tests/e2e/test_wow_entrypoint.py @@ -0,0 +1,83 @@ +"""E2E checks for the canonical wow entry path (solo-developer init in a temp git repo). + +Full `code review run` execution requires bundled marketplace modules; here we verify the +documented first step (init) succeeds in a real temp git workspace and that the registry +surface expected for the second step is consistent with the README/docs contract. +""" + +from __future__ import annotations + +import subprocess +from pathlib import Path +from unittest.mock import patch + +import pytest +from typer.testing import CliRunner + +from specfact_cli.cli import app +from specfact_cli.registry import CommandRegistry +from specfact_cli.registry.bootstrap import register_builtin_commands + + +@pytest.fixture(autouse=True) +def _reset_registry() -> None: + CommandRegistry._clear_for_testing() + yield + CommandRegistry._clear_for_testing() + + +runner = CliRunner() + + +def test_init_solo_developer_exits_zero_in_temp_git_repo(tmp_path: Path) -> None: + """Documented path step 1: init --profile solo-developer in a repo (git init like a real user).""" + subprocess.run(["git", "init"], cwd=tmp_path, check=True, capture_output=True) + with ( + patch("specfact_cli.modules.init.src.commands.install_bundles_for_init", lambda *a, **k: None), + patch( + "specfact_cli.modules.init.src.commands.get_discovered_modules_for_state", + lambda **_: [{"id": "init", "enabled": True}], + ), + patch("specfact_cli.modules.init.src.commands.write_modules_state", lambda _: None), + patch("specfact_cli.modules.init.src.commands.run_discovery_and_write_cache", lambda _: None), + patch("specfact_cli.modules.init.src.commands.is_first_run", lambda **_: True), + ): + result = runner.invoke( + app, + ["init", "--repo", str(tmp_path), "--profile", "solo-developer"], + catch_exceptions=False, + ) + assert result.exit_code == 0, result.stdout + result.stderr + + +def test_after_wow_profile_mock_bundles_registry_lists_code_for_step_two( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path +) -> None: + """Step 2 needs code + code-review bundles; registry exposes `code` group when both are 'installed'.""" + subprocess.run(["git", "init"], cwd=tmp_path, check=True, capture_output=True) + monkeypatch.setattr( + "specfact_cli.modules.init.src.commands.install_bundles_for_init", + lambda *a, **k: None, + ) + monkeypatch.setattr( + "specfact_cli.modules.init.src.commands.get_discovered_modules_for_state", + lambda **_: [{"id": "init", "enabled": True}], + ) + monkeypatch.setattr("specfact_cli.modules.init.src.commands.write_modules_state", lambda _: None) + monkeypatch.setattr("specfact_cli.modules.init.src.commands.run_discovery_and_write_cache", lambda _: None) + monkeypatch.setattr("specfact_cli.modules.init.src.commands.is_first_run", lambda **_: True) + init_r = runner.invoke( + app, + ["init", "--repo", str(tmp_path), "--profile", "solo-developer"], + catch_exceptions=False, + ) + assert init_r.exit_code == 0 + + CommandRegistry._clear_for_testing() + monkeypatch.setattr( + "specfact_cli.registry.module_packages.get_installed_bundles", + lambda _p, _e: ["specfact-codebase", "specfact-code-review"], + ) + register_builtin_commands() + names = CommandRegistry.list_commands() + assert "code" in names, f"Expected code group when codebase+code-review bundles present; got {names}" diff --git a/tests/unit/docs/test_wow_entrypoint_contract.py b/tests/unit/docs/test_wow_entrypoint_contract.py new file mode 100644 index 00000000..d4a9f026 --- /dev/null +++ b/tests/unit/docs/test_wow_entrypoint_contract.py @@ -0,0 +1,77 @@ +"""Contract tests: README and docs landing must match the canonical uvx \"wow\" entry path. + +The wow path is the primary onboarding surface (init + code review with --scope full). +""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + + +REPO_ROOT = Path(__file__).resolve().parents[3] +README = REPO_ROOT / "README.md" +DOCS_INDEX = REPO_ROOT / "docs" / "index.md" + +# Canonical strings — keep in sync with docs/index.md hero and README "Start Here". +UVX_INIT = "uvx specfact-cli init --profile solo-developer" +UVX_REVIEW = "uvx specfact-cli code review run --path . --scope full" +INSTALLED_INIT = "specfact init --profile solo-developer" +INSTALLED_REVIEW = "specfact code review run --path . --scope full" + + +@pytest.fixture(scope="module", autouse=True) +def _require_files() -> None: + if not README.is_file(): + pytest.skip(f"README.md missing at {README}", allow_module_level=True) + if not DOCS_INDEX.is_file(): + pytest.skip(f"docs/index.md missing at {DOCS_INDEX}", allow_module_level=True) + + +def _read(p: Path) -> str: + return p.read_text(encoding="utf-8") + + +def test_readme_and_docs_index_include_identical_uvx_wow_commands() -> None: + """Hero commands in README and docs/index.md must not drift.""" + readme = _read(README) + docs = _read(DOCS_INDEX) + for needle in (UVX_INIT, UVX_REVIEW): + assert needle in readme, f"README.md must contain {needle!r}" + assert needle in docs, f"docs/index.md must contain {needle!r}" + + +def test_readme_documents_pip_free_alternate_and_scope_full_rationale() -> None: + """README explains --scope full and the installed-CLI equivalent.""" + readme = _read(README) + assert "--scope full" in readme + assert INSTALLED_INIT in readme and INSTALLED_REVIEW in readme + assert "Verdict" in readme and "Score" in readme and "findings" in readme.lower() + + +def test_readme_wow_section_appears_before_choose_your_path() -> None: + """Primary entry content must appear before outcome routing.""" + readme = _read(README) + wow = readme.find("uvx specfact-cli init --profile solo-developer") + choose = readme.find("## Choose Your Path") + assert wow != -1 and choose != -1 + assert wow < choose + + +def test_docs_index_wow_block_precedes_what_is_specfact() -> None: + """Landing page leads with the runnable block before deep product copy.""" + docs = _read(DOCS_INDEX) + block = docs.find(UVX_INIT) + heading = docs.find("## What is SpecFact?") + assert block != -1 and heading != -1 + assert block < heading + + +def test_readme_start_here_precedes_documentation_topology() -> None: + """Fast-start remains above internal docs topology (existing contract).""" + readme = _read(README) + start = readme.find("### Start Here") + topo = readme.find("## Documentation Topology") + assert start != -1 and topo != -1 + assert start < topo From 4261a2b9001b14af0e5ab767fc46362945756950 Mon Sep 17 00:00:00 2001 From: Dominikus Nold Date: Thu, 2 Apr 2026 23:11:49 +0200 Subject: [PATCH 6/8] feat(init): solo-developer includes code-review bundle and marketplace install - Add specfact-code-review to canonical bundles and solo-developer preset - Install marketplace module nold-ai/specfact-code-review via install_bundles_for_init - Docs index: core CLI story and default starting point copy for parity tests - CLI: missing-module hint references solo-developer profile - smart_test_coverage: icontract requires use (self, test_level) for method contracts - Re-sign init and module_registry manifests; tests and registry updates Made-with: Cursor --- CHANGELOG.md | 14 ++ docs/index.md | 4 +- .../docs-new-user-onboarding/proposal.md | 2 +- pyproject.toml | 2 +- resources/templates/github-action.yml.j2 | 100 +++++++++----- resources/templates/pr-template.md.j2 | 9 ++ setup.py | 2 +- src/__init__.py | 2 +- src/specfact_cli/__init__.py | 2 +- src/specfact_cli/cli.py | 57 ++++++-- src/specfact_cli/generators/plan_generator.py | 6 +- .../generators/workflow_generator.py | 9 +- .../modules/init/module-package.yaml | 6 +- src/specfact_cli/modules/init/src/commands.py | 17 ++- .../modules/init/src/first_run_selection.py | 125 +++++++++++++----- .../module_registry/module-package.yaml | 6 +- .../modules/module_registry/src/commands.py | 10 +- .../registry/dependency_resolver.py | 38 ++++-- src/specfact_cli/registry/module_installer.py | 21 ++- tests/e2e/test_core_slimming_e2e.py | 3 +- .../test_category_group_routing.py | 7 +- tests/integration/test_core_slimming.py | 3 +- tests/unit/cli/test_lean_help_output.py | 25 +++- .../unit/importers/test_speckit_converter.py | 3 +- .../modules/init/test_first_run_selection.py | 14 +- .../test_multi_module_install_uninstall.py | 77 ++++++++++- .../registry/test_command_registry.py | 14 +- .../test_dependency_resolver_pip_free.py | 53 +++++++- .../specfact_cli/registry/test_help_cache.py | 3 +- .../registry/test_module_packages.py | 3 +- .../registry/test_versioned_bundle_deps.py | 25 +++- tools/smart_test_coverage.py | 97 ++++++++++---- 32 files changed, 573 insertions(+), 186 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 244f307d..c2935162 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,20 @@ All notable changes to this project will be documented in this file. --- +## [0.45.0] - 2026-04-02 + +### Fixed + +- Missing bundle UX: when workflow bundles are not installed, the CLI now reports the + **marketplace module** (e.g. `nold-ai/specfact-codebase` for the `code` group) instead of + `Command 'code' is not installed`, which was easy to confuse with the VS Code `code` CLI. + +- Generated GitHub workflow (`resources/templates/github-action.yml.j2`): GitHub Actions `if` + conditions now use `${{ … }}` so annotations, PR comment, and fail steps evaluate correctly + on GitHub (avoids mixed `always() &&` / raw expression parsing issues). + +--- + ## [0.44.0] - 2026-03-31 ### Added diff --git a/docs/index.md b/docs/index.md index abbfd845..0f90cbed 100644 --- a/docs/index.md +++ b/docs/index.md @@ -34,7 +34,7 @@ SpecFact does **not** include built-in AI. It pairs deterministic CLI commands w ## What is SpecFact? -SpecFact helps you keep backlog intent, specifications, implementation, and validation from drifting apart. It supports spec-first handoffs with **OpenSpec** and spec-kit style workflows so brownfield and AI-assisted teams can keep backlog language, specs, and code aligned. +SpecFact helps you keep backlog intent, specifications, implementation, and validation from drifting apart. It supports spec-first handoffs with **OpenSpec** and spec-kit-style workflows so brownfield and AI-assisted teams can keep backlog language, specs, and code aligned. It is especially useful when: @@ -112,7 +112,7 @@ Installed modules add command groups such as `project`, `backlog`, `code`, `spec ## Modules Documentation -`docs.specfact.io` is the default starting point for the core CLI story and the **canonical starting point for the core CLI story** on this site. Move to the modules site when you need **module-deep workflows**, bundle-specific adapters, and authoring guidance. +`docs.specfact.io` is the default starting point for first-time readers on this site, and the **canonical starting point for the core CLI story** in the docs split. Move to the modules site when you need **module-deep workflows**, bundle-specific adapters, and authoring guidance. - **[Modules Docs Home](https://modules.specfact.io/)** - Backlog, project, spec, govern - **[Module Development](https://modules.specfact.io/authoring/module-development/)** - Build your own modules diff --git a/openspec/changes/docs-new-user-onboarding/proposal.md b/openspec/changes/docs-new-user-onboarding/proposal.md index 6d214fd8..f7e34663 100644 --- a/openspec/changes/docs-new-user-onboarding/proposal.md +++ b/openspec/changes/docs-new-user-onboarding/proposal.md @@ -136,7 +136,7 @@ can truthfully describe a vibe-coder entry sequence. ## Source Tracking - **GitHub Issue**: [#476](https://github.com/nold-ai/specfact-cli/issues/476) -- **Issue URL**: https://github.com/nold-ai/specfact-cli/issues/476 +- **Issue URL**: - **Parent Feature**: [#356](https://github.com/nold-ai/specfact-cli/issues/356) — Documentation & Discrepancy Remediation ([tracking comment](https://github.com/nold-ai/specfact-cli/issues/356#issuecomment-4180162525)) - **Related (overlap)**: [#466](https://github.com/nold-ai/specfact-cli/issues/466) — first-contact / onboarding ([cross-link comment](https://github.com/nold-ai/specfact-cli/issues/466#issuecomment-4180162609)) - **Repository**: nold-ai/specfact-cli diff --git a/pyproject.toml b/pyproject.toml index 8466bb8a..d5fb771c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "specfact-cli" -version = "0.44.0" +version = "0.45.0" description = "The swiss knife CLI for agile DevOps teams. Keep backlog, specs, tests, and code in sync with validation and contract enforcement for new projects and long-lived codebases." readme = "README.md" requires-python = ">=3.11" diff --git a/resources/templates/github-action.yml.j2 b/resources/templates/github-action.yml.j2 index 29ffbea0..1f5dde1e 100644 --- a/resources/templates/github-action.yml.j2 +++ b/resources/templates/github-action.yml.j2 @@ -1,5 +1,7 @@ # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json # yamllint disable rule:line-length rule:truthy +# Generated workflow — aligns with current SpecFact CLI: non-interactive CI must run +# `specfact init --profile …` (or `specfact module install …`) before `specfact code …` commands. name: SpecFact CLI Validation on: @@ -18,7 +20,7 @@ on: workflow_dispatch: inputs: budget: - description: "Time budget in seconds" + description: "Time budget in seconds for code repro" required: false default: "{{ budget }}" type: string @@ -32,14 +34,19 @@ on: - warn - log version_check_mode: - description: "Version check mode (info, warn, block)" + description: "Project bundle version check (info, warn, block; skipped if no .specfact/projects)" required: false - default: "warn" + default: "info" type: choice options: - info - warn - block + bundle_name: + description: "Project bundle name for `specfact project version check` (under .specfact/projects/)" + required: false + default: "{{ bundle_name }}" + type: string jobs: specfact-validation: @@ -59,53 +66,75 @@ jobs: python-version: "{{ python_version }}" cache: "pip" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install hatch - - name: Install SpecFact CLI run: | + python -m pip install --upgrade pip echo "📦 Installing SpecFact CLI..." pip install specfact-cli + - name: Bootstrap SpecFact (CI — required for workflow commands) + run: | + # Non-interactive CI must pass --profile or --install (see `specfact init --help`). + specfact init --profile solo-developer --repo . + - name: Set validation parameters id: validation run: | - BUDGET="${INPUT_BUDGET:-{{ budget }}}" - MODE="${INPUT_MODE:-block}" - VERSION_CHECK_MODE="${INPUT_VERSION_CHECK_MODE:-info}" - echo "budget=$BUDGET" >> $GITHUB_OUTPUT - echo "mode=$MODE" >> $GITHUB_OUTPUT - echo "version_check_mode=$VERSION_CHECK_MODE" >> $GITHUB_OUTPUT - echo "SPECFACT_BUDGET=$BUDGET" >> $GITHUB_ENV - echo "SPECFACT_MODE=$MODE" >> $GITHUB_ENV - echo "SPECFACT_VERSION_CHECK_MODE=$VERSION_CHECK_MODE" >> $GITHUB_ENV + BUDGET="{% raw %}${{ github.event.inputs.budget || '{% endraw %}{{ budget }}{% raw %}' }}{% endraw %}" + MODE="{% raw %}${{ github.event.inputs.mode || 'block' }}{% endraw %}" + VERSION_CHECK_MODE="{% raw %}${{ github.event.inputs.version_check_mode || 'info' }}{% endraw %}" + BUNDLE_NAME="{% raw %}${{ github.event.inputs.bundle_name || '{% endraw %}{{ bundle_name }}{% raw %}' }}{% endraw %}" + echo "budget=$BUDGET" >> "$GITHUB_OUTPUT" + echo "mode=$MODE" >> "$GITHUB_OUTPUT" + echo "version_check_mode=$VERSION_CHECK_MODE" >> "$GITHUB_OUTPUT" + echo "bundle_name=$BUNDLE_NAME" >> "$GITHUB_OUTPUT" + echo "SPECFACT_BUDGET=$BUDGET" >> "$GITHUB_ENV" + echo "SPECFACT_MODE=$MODE" >> "$GITHUB_ENV" + echo "SPECFACT_VERSION_CHECK_MODE=$VERSION_CHECK_MODE" >> "$GITHUB_ENV" + echo "BUNDLE_NAME=$BUNDLE_NAME" >> "$GITHUB_ENV" + + - name: Optional — CrossHair / repro tooling setup + continue-on-error: true + run: specfact code repro setup - - name: Run Contract Validation + - name: Run contract validation (code repro) id: repro continue-on-error: true run: | - specfact repro --verbose --budget {% raw %}${{ steps.validation.outputs.budget }}{% endraw %} || true - echo "exit_code=$?" >> $GITHUB_OUTPUT + set +e + specfact code repro --verbose --budget "{% raw %}${{ steps.validation.outputs.budget }}{% endraw %}" + ec=$? + set -e + echo "exit_code=$ec" >> "$GITHUB_OUTPUT" - - name: Version check + - name: Project bundle version check (optional) id: version_check continue-on-error: true run: | VERSION_CHECK_MODE="{% raw %}${{ steps.validation.outputs.version_check_mode }}{% endraw %}" - echo "📌 Checking bundle version recommendation (mode: $VERSION_CHECK_MODE)..." - if specfact project version check --repo .; then - echo "version_check_passed=true" >> $GITHUB_OUTPUT + BUNDLE_NAME="{% raw %}${{ steps.validation.outputs.bundle_name }}{% endraw %}" + echo "📌 Bundle version check (mode: $VERSION_CHECK_MODE, bundle: $BUNDLE_NAME)..." + if [ ! -d .specfact/projects ]; then + echo "No .specfact/projects — skipping project version check." + echo "version_check_passed=skipped" >> "$GITHUB_OUTPUT" + exit 0 + fi + if ! find .specfact/projects -mindepth 1 -maxdepth 1 -type d 2>/dev/null | grep -q .; then + echo "No project bundles under .specfact/projects — skipping." + echo "version_check_passed=skipped" >> "$GITHUB_OUTPUT" + exit 0 + fi + if specfact project version check --repo . --bundle "$BUNDLE_NAME"; then + echo "version_check_passed=true" >> "$GITHUB_OUTPUT" else - echo "version_check_passed=false" >> $GITHUB_OUTPUT + echo "version_check_passed=false" >> "$GITHUB_OUTPUT" if [ "$VERSION_CHECK_MODE" = "warn" ]; then - echo "⚠️ Version check recommendation not followed (warn mode - continuing)" + echo "⚠️ Version check did not pass (warn mode — continuing)" elif [ "$VERSION_CHECK_MODE" = "block" ]; then - echo "❌ Version check recommendation not followed (block mode - failing)" + echo "❌ Version check failed (block mode)" exit 1 else - echo "ℹ️ Version check recommendation available (info mode - continuing)" + echo "ℹ️ Version check finished (info mode — continuing)" fi fi @@ -117,28 +146,28 @@ jobs: if [ -d "$REPORT_DIR" ]; then LATEST_REPORT=$(find "$REPORT_DIR" -name "report-*.yaml" -type f -printf "%T@ %p\n" | sort -n | tail -1 | cut -d' ' -f2-) if [ -n "$LATEST_REPORT" ]; then - echo "path=$LATEST_REPORT" >> $GITHUB_OUTPUT - echo "SPECFACT_REPORT_PATH=$LATEST_REPORT" >> $GITHUB_ENV + echo "path=$LATEST_REPORT" >> "$GITHUB_OUTPUT" + echo "SPECFACT_REPORT_PATH=$LATEST_REPORT" >> "$GITHUB_ENV" fi fi - name: Create GitHub annotations id: annotations - if: always() && {% raw %}steps.report.outputs.path != ''{% endraw %} + if: {% raw %}${{ always() && steps.report.outputs.path != '' }}{% endraw %} run: | python -m specfact_cli.utils.github_annotations || true - name: Generate PR comment id: pr-comment - if: always() && {% raw %}github.event_name == 'pull_request' && steps.report.outputs.path != ''{% endraw %} + if: {% raw %}${{ always() && github.event_name == 'pull_request' && steps.report.outputs.path != '' }}{% endraw %} run: | python -m specfact_cli.utils.github_annotations if [ -f ".specfact/pr-comment.md" ]; then - echo "comment_path=.specfact/pr-comment.md" >> $GITHUB_OUTPUT + echo "comment_path=.specfact/pr-comment.md" >> "$GITHUB_OUTPUT" fi - name: Post PR comment - if: always() && {% raw %}github.event_name == 'pull_request' && steps.pr-comment.outputs.comment_path != ''{% endraw %} + if: {% raw %}${{ always() && github.event_name == 'pull_request' && steps.pr-comment.outputs.comment_path != '' }}{% endraw %} uses: actions/github-script@v7 with: script: | @@ -165,8 +194,7 @@ jobs: if-no-files-found: ignore - name: Fail workflow if validation failed - if: {% raw %}steps.repro.outputs.exit_code != '0' && steps.validation.outputs.mode == 'block'{% endraw %} + if: {% raw %}${{ steps.repro.outputs.exit_code != '0' && steps.validation.outputs.mode == 'block' }}{% endraw %} run: | echo "❌ Validation failed. Exiting with error code." exit 1 - diff --git a/resources/templates/pr-template.md.j2 b/resources/templates/pr-template.md.j2 index d32006cb..29fee4be 100644 --- a/resources/templates/pr-template.md.j2 +++ b/resources/templates/pr-template.md.j2 @@ -14,6 +14,15 @@ ## ✅ Validation +**CI / local prerequisites (SpecFact v0.40+):** + +- Install: `pip install specfact-cli` (or `uvx specfact-cli@latest` for one-off runs). +- **Non-interactive / CI** must bootstrap workflow bundles before `specfact code …` or `specfact project …`: + - `specfact init --profile solo-developer --repo .` (or another profile / `specfact init --install …`), **or** + - `specfact module install nold-ai/specfact-codebase` (and other bundles as needed). +- Contract repro in CI uses **`specfact code repro`** (not `specfact repro`). Optional: `specfact code repro setup` for CrossHair config. +- Optional `specfact project version check` needs a project under `.specfact/projects//` and `--bundle `. + **SpecFact CLI Validation Results:** {% if validation_passed %} diff --git a/setup.py b/setup.py index f2d563d2..0b867b3f 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ if __name__ == "__main__": _setup = setup( name="specfact-cli", - version="0.44.0", + version="0.45.0", description=( "The swiss knife CLI for agile DevOps teams. Keep backlog, specs, tests, and code in sync with " "validation and contract enforcement for new projects and long-lived codebases." diff --git a/src/__init__.py b/src/__init__.py index 711efdfa..ca0330ca 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -3,4 +3,4 @@ """ # Package version: keep in sync with pyproject.toml, setup.py, src/specfact_cli/__init__.py -__version__ = "0.43.3" +__version__ = "0.45.0" diff --git a/src/specfact_cli/__init__.py b/src/specfact_cli/__init__.py index a4a9b8c3..0d5806f9 100644 --- a/src/specfact_cli/__init__.py +++ b/src/specfact_cli/__init__.py @@ -45,6 +45,6 @@ def _bootstrap_bundle_paths() -> None: _bootstrap_bundle_paths() -__version__ = "0.44.0" +__version__ = "0.45.0" __all__ = ["__version__"] diff --git a/src/specfact_cli/cli.py b/src/specfact_cli/cli.py index 8d0835de..2c0282b9 100644 --- a/src/specfact_cli/cli.py +++ b/src/specfact_cli/cli.py @@ -93,6 +93,49 @@ def _normalized_detect_shell(pid: int | None = None, max_depth: int = 10) -> tup } ) +# First token -> official marketplace module that provides it (not the VS Code `code` CLI). +_INVOKED_TO_MARKETPLACE_MODULE: dict[str, str] = { + "backlog": "nold-ai/specfact-backlog", + "policy": "nold-ai/specfact-backlog", + "code": "nold-ai/specfact-codebase", + "analyze": "nold-ai/specfact-codebase", + "drift": "nold-ai/specfact-codebase", + "validate": "nold-ai/specfact-codebase", + "repro": "nold-ai/specfact-codebase", + "import": "nold-ai/specfact-codebase", + "project": "nold-ai/specfact-project", + "plan": "nold-ai/specfact-project", + "sync": "nold-ai/specfact-project", + "migrate": "nold-ai/specfact-project", + "spec": "nold-ai/specfact-spec", + "contract": "nold-ai/specfact-spec", + "sdd": "nold-ai/specfact-spec", + "generate": "nold-ai/specfact-spec", + "govern": "nold-ai/specfact-govern", + "enforce": "nold-ai/specfact-govern", + "patch": "nold-ai/specfact-govern", +} + + +def _print_missing_bundle_command_help(invoked: str) -> None: + """Print install guidance when a bundle group or shim is not registered.""" + module_id = _INVOKED_TO_MARKETPLACE_MODULE.get(invoked) + console = get_configured_console() + if module_id is not None: + console.print( + f"[bold red]Module '{module_id}' is not installed.[/bold red]\n" + f"The [bold]{invoked}[/bold] command group is provided by that module. " + f"Install with [bold]specfact module install {module_id}[/bold], " + "or run [bold]specfact init --profile solo-developer[/bold] (or another profile) " + "to install bundles." + ) + return + console.print( + f"[bold red]Command '{invoked}' is not installed.[/bold red]\n" + "Install workflow bundles with [bold]specfact init --profile [/bold] " + "or [bold]specfact module install [/bold]." + ) + class _RootCLIGroup(ProgressiveDisclosureGroup): """Root group that shows actionable error when an unknown command is a known bundle group/shim.""" @@ -108,12 +151,7 @@ def resolve_command( result = super().resolve_command(ctx, args) except click.UsageError: if invoked in KNOWN_BUNDLE_GROUP_OR_SHIM_NAMES: - get_configured_console().print( - f"[bold red]Command '{invoked}' is not installed.[/bold red]\n" - "Try: [bold]uvx specfact-cli init --profile solo-developer[/bold] " - "or [bold]specfact init --profile solo-developer[/bold], " - "or install bundles with [bold]specfact module install [/bold]." - ) + _print_missing_bundle_command_help(invoked) raise SystemExit(1) from None raise _name, cmd, remaining = result @@ -122,12 +160,7 @@ def resolve_command( invoked = remaining[0] if invoked not in KNOWN_BUNDLE_GROUP_OR_SHIM_NAMES: return result - get_configured_console().print( - f"[bold red]Command '{invoked}' is not installed.[/bold red]\n" - "Try: [bold]uvx specfact-cli init --profile solo-developer[/bold] " - "or [bold]specfact init --profile solo-developer[/bold], " - "or install bundles with [bold]specfact module install [/bold]." - ) + _print_missing_bundle_command_help(invoked) raise SystemExit(1) diff --git a/src/specfact_cli/generators/plan_generator.py b/src/specfact_cli/generators/plan_generator.py index b98a0082..633c97a2 100644 --- a/src/specfact_cli/generators/plan_generator.py +++ b/src/specfact_cli/generators/plan_generator.py @@ -35,8 +35,10 @@ def __init__(self, templates_dir: Path | None = None) -> None: self.templates_dir = Path(templates_dir) self.env = Environment( loader=FileSystemLoader(self.templates_dir), - trim_blocks=True, - lstrip_blocks=True, + # Must be False: trim_blocks removes the newline after {% endraw %}, merging `if:` with `run:`. + trim_blocks=False, + # Must be False: lstrip_blocks strips newlines after {% endraw %} in some cases. + lstrip_blocks=False, ) @beartype diff --git a/src/specfact_cli/generators/workflow_generator.py b/src/specfact_cli/generators/workflow_generator.py index 322e61ba..fc6144ed 100644 --- a/src/specfact_cli/generators/workflow_generator.py +++ b/src/specfact_cli/generators/workflow_generator.py @@ -40,8 +40,10 @@ def __init__(self, templates_dir: Path | None = None) -> None: self.templates_dir = Path(templates_dir) self.env = Environment( loader=FileSystemLoader(self.templates_dir), - trim_blocks=True, - lstrip_blocks=True, + # Must be False: trim_blocks removes the newline after {% endraw %}, merging `if:` with `run:`. + trim_blocks=False, + # Must be False: lstrip_blocks strips newlines after {% endraw %} in some cases. + lstrip_blocks=False, ) @beartype @@ -56,6 +58,7 @@ def generate_github_action( repo_name: str | None = None, budget: int = 90, python_version: str = "3.12", + bundle_name: str = "main", ) -> None: """ Generate GitHub Action workflow for SpecFact validation. @@ -65,6 +68,7 @@ def generate_github_action( repo_name: Repository name for context budget: Time budget in seconds for validation (must be > 0) python_version: Python version for workflow (must be 3.x) + bundle_name: Default project bundle name for optional `project version check` in CI Raises: FileNotFoundError: If template file doesn't exist @@ -75,6 +79,7 @@ def generate_github_action( "repo_name": repo_name or "specfact-project", "budget": budget, "python_version": python_version, + "bundle_name": bundle_name, } # Render template diff --git a/src/specfact_cli/modules/init/module-package.yaml b/src/specfact_cli/modules/init/module-package.yaml index 1b9e14bb..e1bfdb1c 100644 --- a/src/specfact_cli/modules/init/module-package.yaml +++ b/src/specfact_cli/modules/init/module-package.yaml @@ -1,5 +1,5 @@ name: init -version: 0.1.22 +version: 0.1.23 commands: - init category: core @@ -17,5 +17,5 @@ publisher: description: Initialize SpecFact workspace and bootstrap local configuration. license: Apache-2.0 integrity: - checksum: sha256:741b980089306ea36002e5bb50c1f019aedf0936ca3f8852835e4b6c72138d35 - signature: XZtSMh24mVI4aIYsp1dGA+5Q4GAc9619PGX3wY+DlB+Vm2mCutkBQsGs1E2Mvfjjzj/ghXrmmlJMFFoLpZt3AA== + checksum: sha256:a0a7a9eac6b7407ee8dd08f151b18ecd8b9ff7a263d3adfce3aaa99dfb369c04 + signature: +Lr8H1Fo3NbjPlOi9G0/a3v8eCkDcFZ33TbC8l9p6IpzGda9nYA47rbBgyklBAauXDEl2k8TRZcMFrECXg5BDw== diff --git a/src/specfact_cli/modules/init/src/commands.py b/src/specfact_cli/modules/init/src/commands.py index a0af06b6..a725b15a 100644 --- a/src/specfact_cli/modules/init/src/commands.py +++ b/src/specfact_cli/modules/init/src/commands.py @@ -2,6 +2,7 @@ from __future__ import annotations +import os import subprocess from pathlib import Path from typing import Any, cast @@ -168,6 +169,15 @@ def _copy_backlog_field_mapping_templates(repo_path: Path, force: bool, console: app = typer.Typer(help="Bootstrap SpecFact (use `init ide` for IDE setup; module lifecycle is under `specfact module`)") console = Console() + + +def _init_user_visible_step(message: str) -> None: + """Print init progress unless running under pytest (keeps test output clean).""" + if os.environ.get("PYTEST_CURRENT_TEST"): + return + console.print(message) + + _MODULE_IO_CONTRACT = ModuleIOContract import_to_bundle = module_io_shim.import_to_bundle export_from_bundle = module_io_shim.export_from_bundle @@ -428,12 +438,12 @@ def _install_profile_bundles(profile: str, install_root: Path, non_interactive: """Resolve profile to bundle list and install via module installer.""" bundle_ids = first_run_selection.resolve_profile_bundles(profile) if bundle_ids: + _init_user_visible_step(f"[cyan]→[/cyan] Profile [bold]{profile}[/bold]: preparing workflow bundles…") install_bundles_for_init( bundle_ids, install_root, non_interactive=non_interactive, ) - console.print(f"[green]Installed:[/green] {', '.join(bundle_ids)}") @beartype @@ -441,12 +451,12 @@ def _install_bundle_list(install_arg: str, install_root: Path, non_interactive: """Parse comma-separated or 'all' and install bundles via module installer.""" bundle_ids = first_run_selection.resolve_install_bundles(install_arg) if bundle_ids: + _init_user_visible_step("[cyan]→[/cyan] Installing bundles from [bold]--install[/bold]…") install_bundles_for_init( bundle_ids, install_root, non_interactive=non_interactive, ) - console.print(f"[green]Installed:[/green] {', '.join(bundle_ids)}") def _apply_profile_or_install_bundles(profile: str | None, install: str | None) -> None: @@ -707,10 +717,12 @@ def init( elif is_first_run(user_root=INIT_USER_MODULES_ROOT) and not is_non_interactive(): _run_interactive_first_run_install() + _init_user_visible_step("[cyan]→[/cyan] Discovering installed modules and writing registry state…") modules_list = get_discovered_modules_for_state(enable_ids=[], disable_ids=[]) if modules_list: write_modules_state(modules_list) + _init_user_visible_step("[cyan]→[/cyan] Indexing CLI commands for help cache…") run_discovery_and_write_cache(__version__) if install_deps: @@ -727,6 +739,7 @@ def init( "[cyan]Module management has moved to `specfact module`[/cyan] " "[dim](for example: `specfact module list`, `specfact module init`)[/dim]" ) + _init_user_visible_step("[cyan]→[/cyan] Checking IDE prompt export status…") _audit_prompt_installation(repo_path) console.print("[dim]Use `specfact init ide` to install/update IDE prompts and settings.[/dim]") diff --git a/src/specfact_cli/modules/init/src/first_run_selection.py b/src/specfact_cli/modules/init/src/first_run_selection.py index 5765217f..7c72d9d9 100644 --- a/src/specfact_cli/modules/init/src/first_run_selection.py +++ b/src/specfact_cli/modules/init/src/first_run_selection.py @@ -2,6 +2,7 @@ from __future__ import annotations +import os from pathlib import Path from beartype import beartype @@ -24,17 +25,18 @@ ], } -CANONICAL_BUNDLES: tuple[str, ...] = ( +_INSTALL_ALL_BUNDLES: tuple[str, ...] = ( "specfact-project", "specfact-backlog", "specfact-codebase", "specfact-spec", "specfact-govern", - "specfact-code-review", ) -# Marketplace module IDs for bundles that are not shipped as bundled (core) modules. -MARKETPLACE_BUNDLE_IDS: dict[str, str] = { +# Includes marketplace-only bundles referenced by profiles (e.g. specfact-code-review). +CANONICAL_BUNDLES: tuple[str, ...] = (*_INSTALL_ALL_BUNDLES, "specfact-code-review") + +MARKETPLACE_ONLY_BUNDLES: dict[str, str] = { "specfact-code-review": "nold-ai/specfact-code-review", } @@ -53,13 +55,28 @@ "specfact-codebase": ["analyze", "drift", "validate", "repro"], "specfact-spec": ["contract", "spec", "sdd", "generate"], "specfact-govern": ["enforce", "patch_mode"], - "specfact-code-review": [], # marketplace-only; installed via MARKETPLACE_BUNDLE_IDS + "specfact-code-review": [], } BUNDLE_DEPENDENCIES: dict[str, list[str]] = { "specfact-spec": ["specfact-project"], + "specfact-code-review": ["specfact-codebase"], } +BUNDLE_DISPLAY: dict[str, str] = { + "specfact-project": "Project lifecycle (project, plan, import, sync, migrate)", + "specfact-backlog": "Backlog management (backlog, policy)", + "specfact-codebase": "Codebase quality (analyze, drift, validate, repro)", + "specfact-spec": "Spec & API (contract, spec, sdd, generate)", + "specfact-govern": "Governance (enforce, patch)", + "specfact-code-review": "Scored code review (code review gate)", +} + + +def _emit_init_bundle_progress() -> bool: + """Return True when init should print progress (suppressed during pytest).""" + return os.environ.get("PYTEST_CURRENT_TEST") is None + @require(lambda profile: isinstance(profile, str) and profile.strip() != "", "profile must be non-empty string") @ensure(lambda result: isinstance(result, list), "result must be list of bundle ids") @@ -82,7 +99,7 @@ def resolve_install_bundles(install_arg: str) -> list[str]: if not raw: return [] if raw.lower() == "all": - return list(CANONICAL_BUNDLES) + return list(_INSTALL_ALL_BUNDLES) seen: set[str] = set() result: list[str] = [] for part in raw.split(","): @@ -131,16 +148,22 @@ def install_bundles_for_init( *, non_interactive: bool = False, trust_non_official: bool = False, + show_progress: bool = True, ) -> None: """Install the given bundles (and their dependencies) via bundled module installer.""" + from rich.console import Console + from specfact_cli.registry.module_installer import ( USER_MODULES_ROOT as DEFAULT_ROOT, install_bundled_module, + install_module, ) root = install_root or DEFAULT_ROOT to_install: list[str] = [] seen: set[str] = set() + emit = show_progress and _emit_init_bundle_progress() + console = Console() def _add_bundle(bid: str) -> None: if bid in seen: @@ -155,15 +178,30 @@ def _add_bundle(bid: str) -> None: continue _add_bundle(bid) + if emit and to_install: + bundle_list = ", ".join(to_install) + console.print(f"[cyan]→[/cyan] Seeding workflow bundles: [bold]{bundle_list}[/bold]") + console.print("[dim] (copying bundled modules into your user module directory)[/dim]") + for bid in to_install: - marketplace_id = MARKETPLACE_BUNDLE_IDS.get(bid) - if marketplace_id is not None: + module_names = BUNDLE_TO_MODULE_NAMES.get(bid, []) + bundle_label = BUNDLE_DISPLAY.get(bid, bid) + marketplace_id = MARKETPLACE_ONLY_BUNDLES.get(bid) + if emit: + if module_names or marketplace_id: + console.print(f"[cyan]→[/cyan] Bundle [bold]{bid}[/bold] — {bundle_label}") + else: + console.print( + f"[yellow]→[/yellow] Bundle [bold]{bid}[/bold] has no bundled modules in this CLI; " + f"install with [bold]specfact module install nold-ai/{bid}[/bold] when online." + ) + for module_name in module_names: + if emit: + console.print(f"[dim] ·[/dim] Installing module [bold]{module_name}[/bold] …") try: - from specfact_cli.registry.module_installer import install_module - - install_module( - marketplace_id, - install_root=root, + installed = install_bundled_module( + module_name, + root, trust_non_official=trust_non_official, non_interactive=non_interactive, ) @@ -172,32 +210,61 @@ def _add_bundle(bid: str) -> None: logger = get_bridge_logger(__name__) logger.warning( - "Marketplace bundle install failed for %s: %s.", - bid, + "Bundle install failed for %s: %s. Dependency resolver may be unavailable.", + module_name, e, ) + if emit: + console.print( + f"[red]✗[/red] Failed on module [bold]{module_name}[/bold] from bundle [bold]{bid}[/bold]: {e}" + ) + console.print( + "[dim] Check disk space and permissions under ~/.specfact/modules, " + "or retry if a transient I/O error.[/dim]" + ) raise - continue - - module_names = BUNDLE_TO_MODULE_NAMES.get(bid, []) - for module_name in module_names: + if installed: + if emit: + console.print(f"[green] ✓[/green] {module_name} ready") + elif emit: + console.print( + f"[yellow] ⚠[/yellow] {module_name} is not bundled in this CLI build; " + f"try [bold]specfact module install nold-ai/{bid}[/bold] when online." + ) + if marketplace_id: + if emit: + console.print(f"[dim] ·[/dim] Installing marketplace module [bold]{marketplace_id}[/bold] …") try: - install_bundled_module( - module_name, - root, - trust_non_official=trust_non_official, + install_module( + marketplace_id, + install_root=root, non_interactive=non_interactive, + trust_non_official=trust_non_official, ) except Exception as e: from specfact_cli.common import get_bridge_logger logger = get_bridge_logger(__name__) logger.warning( - "Bundle install failed for %s: %s. Dependency resolver may be unavailable.", - module_name, + "Marketplace bundle install failed for %s: %s.", + marketplace_id, e, ) + if emit: + console.print( + f"[red]✗[/red] Failed on marketplace module [bold]{marketplace_id}[/bold] " + f"from bundle [bold]{bid}[/bold]: {e}" + ) + console.print( + "[dim] Check network access and permissions under ~/.specfact/modules, " + "or retry if a transient error.[/dim]" + ) raise + if emit: + console.print(f"[green] ✓[/green] {marketplace_id.split('/', 1)[1]} ready") + + if emit and to_install: + console.print(f"[green]✓[/green] Installed: {', '.join(to_install)}") @ensure(lambda result: isinstance(result, list) and len(result) > 0, "Must return non-empty list of profile names") @@ -212,14 +279,6 @@ def get_valid_bundle_aliases() -> list[str]: return [*sorted(BUNDLE_ALIAS_TO_CANONICAL), "all"] -BUNDLE_DISPLAY: dict[str, str] = { - "specfact-project": "Project lifecycle (project, plan, import, sync, migrate)", - "specfact-backlog": "Backlog management (backlog, policy)", - "specfact-codebase": "Codebase quality (analyze, drift, validate, repro)", - "specfact-spec": "Spec & API (contract, spec, sdd, generate)", - "specfact-govern": "Governance (enforce, patch)", -} - PROFILE_DISPLAY_ORDER: list[tuple[str, str]] = [ ("solo-developer", "Solo developer"), ("backlog-team", "Backlog team"), diff --git a/src/specfact_cli/modules/module_registry/module-package.yaml b/src/specfact_cli/modules/module_registry/module-package.yaml index 99a8a647..6ab99ec2 100644 --- a/src/specfact_cli/modules/module_registry/module-package.yaml +++ b/src/specfact_cli/modules/module_registry/module-package.yaml @@ -1,5 +1,5 @@ name: module-registry -version: 0.1.13 +version: 0.1.14 commands: - module category: core @@ -17,5 +17,5 @@ publisher: description: 'Manage modules: search, list, show, install, and upgrade.' license: Apache-2.0 integrity: - checksum: sha256:2925d4f84696a6404abd0ed3ba1a905a0a9e74a8343ab8568a4eb69f071b9bfe - signature: 3bmXM6yHE+E/w+cHnCIKBnjlZWm+IYQpff6UX4mSD/GWovgq5BylipvKXpp7Z5OHR+ls6pOdT4o/9gMmcTS9Cw== + checksum: sha256:de47d37a18c4509a043036520d645f46419c56f0756e26cd193b23df7a3955fb + signature: z1s/9AKFJFLROy7RoV/Nk0zPaaPWNjsnNvHb0mw9DS3tbc8q0WDuZVVFvLAyvln3yi1YulQMIRJUFlSI8GYnCQ== diff --git a/src/specfact_cli/modules/module_registry/src/commands.py b/src/specfact_cli/modules/module_registry/src/commands.py index 7cc84368..1ada19d5 100644 --- a/src/specfact_cli/modules/module_registry/src/commands.py +++ b/src/specfact_cli/modules/module_registry/src/commands.py @@ -316,10 +316,14 @@ def install( if not module_ids: console.print("[red]At least one module id is required.[/red]") raise typer.Exit(1) + if version is not None and sum(1 for mid in module_ids if mid.strip()) > 1: + console.print( + "[red]--version applies to a single module; install one module at a time or omit --version.[/red]" + ) + raise typer.Exit(1) scope_normalized, source_normalized = _parse_install_scope_and_source(scope, source) target_root = _resolve_install_target_root(scope_normalized, repo) discovered_by_name = {entry.metadata.name: entry for entry in discover_all_modules()} - failed: list[str] = [] for module_id in module_ids: if not module_id.strip(): continue @@ -336,9 +340,7 @@ def install( discovered_by_name, ) if not success: - failed.append(module_id) - if failed: - raise typer.Exit(1) + raise typer.Exit(1) def _normalize_uninstall_module_name(module_name: str) -> str: diff --git a/src/specfact_cli/registry/dependency_resolver.py b/src/specfact_cli/registry/dependency_resolver.py index 0a1d4bc1..56d69b2d 100644 --- a/src/specfact_cli/registry/dependency_resolver.py +++ b/src/specfact_cli/registry/dependency_resolver.py @@ -21,6 +21,10 @@ class DependencyConflictError(Exception): """Raised when pip dependency resolution detects conflicting version constraints.""" +class PipDependencyValidationUnavailableError(RuntimeError): + """Raised when pip is unavailable and pip dependency validation must not be skipped.""" + + @beartype def _pip_tools_available() -> bool: """Return True if pip-compile is available.""" @@ -75,20 +79,25 @@ def _pip_module_available() -> bool: @beartype -def _run_basic_resolver(constraints: list[str]) -> list[str]: +def _run_basic_resolver(constraints: list[str], *, allow_unvalidated: bool = False) -> list[str]: """Fallback: use pip's resolver (e.g. pip install --dry-run). Returns best-effort pinned list. - When pip is not available (e.g. uvx environment), skips validation and returns constraints as-is. - Pip packages will be validated at actual install time by the host package manager. + When pip is not available (e.g. uvx environment), validation is skipped only if + ``allow_unvalidated`` is True; otherwise :class:`PipDependencyValidationUnavailableError` is raised. """ if not constraints: return [] if not _pip_module_available(): - logger.warning( - "pip is not available in the current environment (e.g. uvx). " - "Skipping pip dependency validation — packages will be checked at install time." + if allow_unvalidated: + logger.warning( + "pip is not available in the current environment (e.g. uvx). " + "Skipping pip dependency validation — packages will be checked at install time." + ) + return constraints + raise PipDependencyValidationUnavailableError( + "pip is not available in this environment; cannot validate pip dependency constraints. " + "Install pip, or invoke resolution from a flow that explicitly allows unvalidated constraints." ) - return constraints logger.warning("pip-tools not found, using basic resolver") with tempfile.TemporaryDirectory() as tmp: reqs = Path(tmp) / "requirements.in" @@ -125,11 +134,20 @@ def _collect_constraints(modules: list[ModulePackageMetadata]) -> list[str]: @beartype @require(lambda modules: all(isinstance(m, ModulePackageMetadata) for m in modules)) @ensure(lambda result: isinstance(result, list)) -def resolve_dependencies(modules: list[ModulePackageMetadata]) -> list[str]: - """Resolve pip dependencies across all modules; use pip-compile or fallback. Raises DependencyConflictError on conflict.""" +def resolve_dependencies( + modules: list[ModulePackageMetadata], + *, + allow_unvalidated: bool = False, +) -> list[str]: + """Resolve pip dependencies across all modules; use pip-compile or fallback. + + Raises DependencyConflictError on conflict. + When pip-tools and pip are unavailable, raises PipDependencyValidationUnavailableError unless + ``allow_unvalidated`` is True (supported pip-free flows such as module install under uvx). + """ constraints = _collect_constraints(modules) if not constraints: return [] if _pip_tools_available(): return _run_pip_compile(constraints) - return _run_basic_resolver(constraints) + return _run_basic_resolver(constraints, allow_unvalidated=allow_unvalidated) diff --git a/src/specfact_cli/registry/module_installer.py b/src/specfact_cli/registry/module_installer.py index dcf4b6cb..064effba 100644 --- a/src/specfact_cli/registry/module_installer.py +++ b/src/specfact_cli/registry/module_installer.py @@ -141,10 +141,21 @@ def _extract_bundle_dependencies(metadata: dict[str, Any]) -> list[str]: if not isinstance(raw_dependencies, list): return [] dependencies: list[str] = [] - for value in raw_dependencies: - dep = str(value.get("id", "")).strip() if isinstance(value, dict) else str(value).strip() - if not dep: - continue + for index, value in enumerate(raw_dependencies): + if isinstance(value, dict): + raw_id = value.get("id") + if raw_id is None or not str(raw_id).strip(): + raise ValueError( + f"bundle_dependencies[{index}]: object entry must include non-empty 'id' " + f"(invalid manifest; got {value!r})" + ) + dep = str(raw_id).strip() + else: + dep = str(value).strip() + if not dep: + raise ValueError( + f"bundle_dependencies[{index}]: string entry must be non-empty (invalid manifest; got {value!r})" + ) _validate_marketplace_namespace_format(dep) dependencies.append(dep) return dependencies @@ -790,7 +801,7 @@ def _install_bundle_dependencies_for_module( try: all_metas = [e.metadata for e in discover_all_modules()] all_metas.append(metadata_obj) - resolve_dependencies(all_metas) + resolve_dependencies(all_metas, allow_unvalidated=True) except DependencyConflictError as dep_err: if not force: raise ValueError( diff --git a/tests/e2e/test_core_slimming_e2e.py b/tests/e2e/test_core_slimming_e2e.py index 861476b6..5a51ca9e 100644 --- a/tests/e2e/test_core_slimming_e2e.py +++ b/tests/e2e/test_core_slimming_e2e.py @@ -2,6 +2,7 @@ from __future__ import annotations +from collections.abc import Generator from pathlib import Path import pytest @@ -9,7 +10,7 @@ @pytest.fixture(autouse=True) -def _reset_registry(): +def _reset_registry() -> Generator[None, None, None]: """Ensure registry is cleared so E2E sees predictable bootstrap state when we re-bootstrap.""" from specfact_cli.registry import CommandRegistry diff --git a/tests/integration/test_category_group_routing.py b/tests/integration/test_category_group_routing.py index 58cca9f2..3c2c4103 100644 --- a/tests/integration/test_category_group_routing.py +++ b/tests/integration/test_category_group_routing.py @@ -48,9 +48,10 @@ def test_backlog_help_lists_subcommands() -> None: assert "backlog" in out assert "policy" in out or "ceremony" in out return - assert "command 'backlog' is not installed." in out - assert "specfact init --profile" in out - assert "module install" in out + merged = " ".join(out.split()) + assert "module 'nold-ai/specfact-backlog' is not installed." in merged + assert "specfact module install nold-ai/specfact-backlog" in merged + assert "specfact init --profile " in merged def test_validate_flat_command_is_not_available() -> None: diff --git a/tests/integration/test_core_slimming.py b/tests/integration/test_core_slimming.py index b111e796..4b5a420e 100644 --- a/tests/integration/test_core_slimming.py +++ b/tests/integration/test_core_slimming.py @@ -2,6 +2,7 @@ from __future__ import annotations +from collections.abc import Generator from pathlib import Path from unittest.mock import MagicMock, patch @@ -23,7 +24,7 @@ @pytest.fixture(autouse=True) -def _reset_registry(): +def _reset_registry() -> Generator[None, None, None]: """Reset registry before each test so bootstrap state is predictable.""" CommandRegistry._clear_for_testing() yield diff --git a/tests/unit/cli/test_lean_help_output.py b/tests/unit/cli/test_lean_help_output.py index 18eff08c..33b8ad2c 100644 --- a/tests/unit/cli/test_lean_help_output.py +++ b/tests/unit/cli/test_lean_help_output.py @@ -81,12 +81,25 @@ def test_root_group_unknown_bundle_command_shows_install_guidance(capsys: pytest assert exc_info.value.code == 1 captured = capsys.readouterr() - assert "Command 'backlog' is not installed." in captured.out - assert ( - "specfact init --profile " in captured.out - or "uvx specfact-cli init --profile solo-developer" in captured.out - ) - assert "module install " in captured.out + out = " ".join(captured.out.split()) + assert "Module 'nold-ai/specfact-backlog' is not installed." in out + assert "specfact module install nold-ai/specfact-backlog" in out + assert "specfact init --profile " in out + + +def test_root_group_unknown_code_shows_specfact_codebase_module(capsys: pytest.CaptureFixture[str]) -> None: + """Missing `code` group should name nold-ai/specfact-codebase (not the VS Code `code` CLI).""" + group = _RootCLIGroup(name="specfact") + ctx = click.Context(group) + + with pytest.raises(SystemExit) as exc_info: + group.resolve_command(ctx, ["code", "--help"]) + + assert exc_info.value.code == 1 + captured = capsys.readouterr() + out = " ".join(captured.out.split()) + assert "Module 'nold-ai/specfact-codebase' is not installed." in out + assert "specfact module install nold-ai/specfact-codebase" in out def test_specfact_help_with_all_bundles_installed_shows_eight_commands( diff --git a/tests/unit/importers/test_speckit_converter.py b/tests/unit/importers/test_speckit_converter.py index 6e2575b3..53d9f4d9 100644 --- a/tests/unit/importers/test_speckit_converter.py +++ b/tests/unit/importers/test_speckit_converter.py @@ -119,7 +119,8 @@ def test_generate_github_action(self, tmp_path: Path) -> None: # Verify workflow content (business logic) content = output_path.read_text() assert "SpecFact CLI Validation" in content - assert "specfact repro" in content + assert "specfact code repro" in content + assert "specfact init --profile solo-developer" in content def test_convert_to_speckit_sequential_numbering(self, tmp_path: Path) -> None: """Test convert_to_speckit uses sequential numbering when feature keys lack numbers.""" diff --git a/tests/unit/modules/init/test_first_run_selection.py b/tests/unit/modules/init/test_first_run_selection.py index 1b305ded..f3bb8d83 100644 --- a/tests/unit/modules/init/test_first_run_selection.py +++ b/tests/unit/modules/init/test_first_run_selection.py @@ -61,7 +61,7 @@ def test_install_backlog_codebase_resolves_to_two_bundles() -> None: assert len(bundles) == 2 -def test_install_all_resolves_to_all_canonical_bundles() -> None: +def test_install_all_resolves_to_all_five_bundles() -> None: bundles = frs.resolve_install_bundles("all") assert set(bundles) == { "specfact-project", @@ -69,9 +69,8 @@ def test_install_all_resolves_to_all_canonical_bundles() -> None: "specfact-codebase", "specfact-spec", "specfact-govern", - "specfact-code-review", } - assert len(bundles) == 6 + assert len(bundles) == 5 def test_install_unknown_bundle_raises() -> None: @@ -252,9 +251,7 @@ def _fake_install_bundles(bundle_ids: list[str], install_root: Path, **kwargs: o assert set(install_calls[0]) == {"specfact-backlog", "specfact-codebase"} -def test_init_install_all_calls_installer_with_all_canonical_bundles( - monkeypatch: pytest.MonkeyPatch, tmp_path: Path -) -> None: +def test_init_install_all_calls_installer_with_five_bundles(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: install_calls: list[list[str]] = [] def _fake_install_bundles(bundle_ids: list[str], install_root: Path, **kwargs: object) -> None: @@ -279,14 +276,13 @@ def _fake_install_bundles(bundle_ids: list[str], install_root: Path, **kwargs: o ) assert result.exit_code == 0, result.output assert len(install_calls) == 1 - assert len(install_calls[0]) == 6 + assert len(install_calls[0]) == 5 assert set(install_calls[0]) == { "specfact-project", "specfact-backlog", "specfact-codebase", "specfact-spec", "specfact-govern", - "specfact-code-review", } @@ -403,7 +399,7 @@ def _record_install(module_name: str, target_root: Path, **kwargs: object) -> bo "specfact_cli.registry.module_installer.install_bundled_module", _record_install, ) - frs.install_bundles_for_init(["specfact-spec"], install_root=tmp_path) + frs.install_bundles_for_init(["specfact-spec"], install_root=tmp_path, show_progress=False) project_module_names = set(frs.BUNDLE_TO_MODULE_NAMES.get("specfact-project", [])) spec_module_names = set(frs.BUNDLE_TO_MODULE_NAMES.get("specfact-spec", [])) installed_set = set(installed_modules) diff --git a/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py b/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py index ef024e30..23c3eb51 100644 --- a/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py +++ b/tests/unit/specfact_cli/modules/test_multi_module_install_uninstall.py @@ -7,6 +7,7 @@ from __future__ import annotations from collections.abc import Generator +from dataclasses import dataclass from pathlib import Path from typing import Any from unittest.mock import patch @@ -35,6 +36,17 @@ def _reset_registry_and_root_app() -> Generator[None, None, None]: runner = CliRunner() +@dataclass +class MockMetadata: + name: str + + +@dataclass +class MockEntry: + metadata: MockMetadata + source: str + + def _unstyled(text: str) -> str: return click.unstyle(text) @@ -78,6 +90,24 @@ def _fake_install(module_id: str, **kwargs: object) -> Path: assert "nold-ai/specfact-code-review" in installed or "specfact-code-review" in str(installed) +def test_module_install_rejects_version_with_multiple_module_ids() -> None: + """--version is only valid with a single module id.""" + result = runner.invoke( + app, + [ + "module", + "install", + "nold-ai/specfact-codebase", + "nold-ai/specfact-code-review", + "--version", + "1.0.0", + ], + ) + assert result.exit_code == 1 + out = _unstyled(result.output).lower() + assert "single" in out and "version" in out + + def test_module_install_single_still_works() -> None: """Single-module install must still work after multi-install change.""" installed: list[str] = [] @@ -110,6 +140,43 @@ def _fake_install(module_id: str, **kwargs: object) -> Path: assert len(installed) == 1 +def test_module_install_multi_aborts_on_first_failure_without_installing_rest() -> None: + """Multi-install: if module A fails, do not attempt B (avoid partial surprise state).""" + installed: list[str] = [] + + def _fake_install(module_id: str, **kwargs: object) -> Path: + if "codebase" in module_id: + raise RuntimeError("mock install failure for first module") + installed.append(module_id) + return Path("/tmp/ok") + + with ( + patch( + "specfact_cli.modules.module_registry.src.commands.install_module", + side_effect=_fake_install, + ), + patch( + "specfact_cli.modules.module_registry.src.commands.discover_all_modules", + return_value=[], + ), + patch( + "specfact_cli.modules.module_registry.src.commands._install_skip_if_already_satisfied", + return_value=False, + ), + patch( + "specfact_cli.modules.module_registry.src.commands._try_install_bundled_module", + return_value=False, + ), + ): + result = runner.invoke( + app, + ["module", "install", "nold-ai/specfact-codebase", "nold-ai/specfact-code-review"], + ) + + assert result.exit_code == 1 + assert installed == [], "Second module must not install after first fails" + + def test_module_install_multi_skips_already_installed_and_continues() -> None: """Multi-install: if A is already installed, skip A but still install B; exit 0.""" installed: list[str] = [] @@ -163,10 +230,8 @@ def _fake_uninstall(module_name: str, **kwargs: object) -> None: patch( "specfact_cli.modules.module_registry.src.commands.discover_all_modules", return_value=[ - type("E", (), {"metadata": type("M", (), {"name": "specfact-codebase"})(), "source": "marketplace"})(), - type( - "E", (), {"metadata": type("M", (), {"name": "specfact-code-review"})(), "source": "marketplace"} - )(), + MockEntry(MockMetadata("specfact-codebase"), "marketplace"), + MockEntry(MockMetadata("specfact-code-review"), "marketplace"), ], ), ): @@ -183,7 +248,7 @@ def test_module_uninstall_single_still_works() -> None: patch( "specfact_cli.modules.module_registry.src.commands.discover_all_modules", return_value=[ - type("E", (), {"metadata": type("M", (), {"name": "specfact-codebase"})(), "source": "marketplace"})(), + MockEntry(MockMetadata("specfact-codebase"), "marketplace"), ], ), ): @@ -200,7 +265,7 @@ def _fake_uninstall(module_name: str, **kwargs: object) -> None: uninstalled.append(module_name) discovered = [ - type("E", (), {"metadata": type("M", (), {"name": "specfact-code-review"})(), "source": "marketplace"})(), + MockEntry(MockMetadata("specfact-code-review"), "marketplace"), ] with ( diff --git a/tests/unit/specfact_cli/registry/test_command_registry.py b/tests/unit/specfact_cli/registry/test_command_registry.py index 00fd6ce3..694a304f 100644 --- a/tests/unit/specfact_cli/registry/test_command_registry.py +++ b/tests/unit/specfact_cli/registry/test_command_registry.py @@ -7,6 +7,7 @@ from __future__ import annotations import os +from collections.abc import Generator from pathlib import Path import pytest @@ -30,7 +31,7 @@ def _subprocess_env() -> dict[str, str]: @pytest.fixture(autouse=True) -def _reset_registry(): +def _reset_registry() -> Generator[None, None, None]: """Reset registry before each test so tests are isolated.""" CommandRegistry._clear_for_testing() yield @@ -191,10 +192,13 @@ def test_cli_backlog_help_exits_zero(): ) if result.returncode == 0: return - merged = (result.stdout or "") + "\n" + (result.stderr or "") - assert "Command 'backlog' is not installed." in merged, (result.stdout, result.stderr) - assert "specfact init --profile" in merged, (result.stdout, result.stderr) - assert "module install" in merged, (result.stdout, result.stderr) + assert result.returncode == 1, (result.stdout, result.stderr) + merged = " ".join(((result.stdout or "") + "\n" + (result.stderr or "")).split()) + assert "Module 'nold-ai/specfact-backlog' is not installed." in merged, (result.stdout, result.stderr) + assert "The backlog command group is provided by that module." in merged, (result.stdout, result.stderr) + assert "specfact module install nold-ai/specfact-backlog" in merged, (result.stdout, result.stderr) + assert "specfact init --profile " in merged, (result.stdout, result.stderr) + assert "to install bundles." in merged, (result.stdout, result.stderr) def test_cli_module_help_exits_zero(): diff --git a/tests/unit/specfact_cli/registry/test_dependency_resolver_pip_free.py b/tests/unit/specfact_cli/registry/test_dependency_resolver_pip_free.py index a17b7d4c..7d638586 100644 --- a/tests/unit/specfact_cli/registry/test_dependency_resolver_pip_free.py +++ b/tests/unit/specfact_cli/registry/test_dependency_resolver_pip_free.py @@ -6,19 +6,24 @@ from __future__ import annotations +import subprocess from typing import cast from unittest.mock import patch +import pytest + from specfact_cli.models.module_package import ModulePackageMetadata -from specfact_cli.registry.dependency_resolver import _run_basic_resolver, resolve_dependencies +from specfact_cli.registry.dependency_resolver import ( + PipDependencyValidationUnavailableError, + _run_basic_resolver, + resolve_dependencies, +) def test_run_basic_resolver_returns_constraints_when_pip_unavailable() -> None: """When pip is unavailable (uvx environment), basic resolver must not raise — return constraints.""" constraints = ["requests>=2.28.0", "pyyaml>=6.0"] - import subprocess - def _pip_not_available(*cmd_args: object, **kwargs: object) -> subprocess.CompletedProcess[str]: return subprocess.CompletedProcess( args=cast(list[str | bytes], [str(a) for a in cmd_args]), @@ -28,12 +33,30 @@ def _pip_not_available(*cmd_args: object, **kwargs: object) -> subprocess.Comple ) with patch("specfact_cli.registry.dependency_resolver.subprocess.run", side_effect=_pip_not_available): - result = _run_basic_resolver(constraints) + result = _run_basic_resolver(constraints, allow_unvalidated=True) # Must not raise; must return something (constraints or empty list) assert isinstance(result, list), "Should return a list even when pip is unavailable" +def test_run_basic_resolver_raises_when_pip_unavailable_without_allow_unvalidated() -> None: + """Without allow_unvalidated, missing pip must not silently skip validation.""" + + def _pip_not_available(*cmd_args: object, **kwargs: object) -> subprocess.CompletedProcess[str]: + return subprocess.CompletedProcess( + args=cast(list[str | bytes], [str(a) for a in cmd_args]), + returncode=1, + stdout="", + stderr="No module named pip", + ) + + with ( + patch("specfact_cli.registry.dependency_resolver.subprocess.run", side_effect=_pip_not_available), + pytest.raises(PipDependencyValidationUnavailableError), + ): + _run_basic_resolver(["requests>=1"], allow_unvalidated=False) + + def test_resolve_dependencies_does_not_raise_when_pip_unavailable() -> None: """resolve_dependencies must complete without raising when pip and pip-compile are both unavailable.""" module = ModulePackageMetadata( @@ -53,9 +76,31 @@ def test_resolve_dependencies_does_not_raise_when_pip_unavailable() -> None: result = resolve_dependencies([module]) mock_basic.assert_called_once() + assert mock_basic.call_args.kwargs.get("allow_unvalidated") is False assert isinstance(result, list) +def test_resolve_dependencies_passes_allow_unvalidated_to_basic_resolver() -> None: + """Module install path requests unvalidated resolution when pip is missing (uvx).""" + module = ModulePackageMetadata( + name="test-module", + version="0.1.0", + commands=["test"], + pip_dependencies=["requests>=2.28.0"], + ) + + with ( + patch("specfact_cli.registry.dependency_resolver._pip_tools_available", return_value=False), + patch( + "specfact_cli.registry.dependency_resolver._run_basic_resolver", + return_value=["requests>=2.28.0"], + ) as mock_basic, + ): + resolve_dependencies([module], allow_unvalidated=True) + + assert mock_basic.call_args.kwargs.get("allow_unvalidated") is True + + def test_resolve_dependencies_empty_modules_returns_empty() -> None: """resolve_dependencies with no pip deps must return [] without calling pip.""" module = ModulePackageMetadata( diff --git a/tests/unit/specfact_cli/registry/test_help_cache.py b/tests/unit/specfact_cli/registry/test_help_cache.py index 242ca175..c39885dd 100644 --- a/tests/unit/specfact_cli/registry/test_help_cache.py +++ b/tests/unit/specfact_cli/registry/test_help_cache.py @@ -9,6 +9,7 @@ import os import subprocess import sys +from collections.abc import Generator from pathlib import Path import pytest @@ -51,7 +52,7 @@ def registry_dir(tmp_path: Path): @pytest.fixture(autouse=True) -def _reset_registry(): +def _reset_registry() -> Generator[None, None, None]: """Reset registry before each test.""" CommandRegistry._clear_for_testing() yield diff --git a/tests/unit/specfact_cli/registry/test_module_packages.py b/tests/unit/specfact_cli/registry/test_module_packages.py index 77135e45..ac936d0a 100644 --- a/tests/unit/specfact_cli/registry/test_module_packages.py +++ b/tests/unit/specfact_cli/registry/test_module_packages.py @@ -9,6 +9,7 @@ import logging import os +from collections.abc import Generator from pathlib import Path import pytest @@ -34,7 +35,7 @@ @pytest.fixture(autouse=True) -def _reset_registry(): +def _reset_registry() -> Generator[None, None, None]: CommandRegistry._clear_for_testing() yield CommandRegistry._clear_for_testing() diff --git a/tests/unit/specfact_cli/registry/test_versioned_bundle_deps.py b/tests/unit/specfact_cli/registry/test_versioned_bundle_deps.py index 7bbd9487..700c20ea 100644 --- a/tests/unit/specfact_cli/registry/test_versioned_bundle_deps.py +++ b/tests/unit/specfact_cli/registry/test_versioned_bundle_deps.py @@ -8,6 +8,8 @@ from typing import Any +import pytest + from specfact_cli.registry.module_installer import _extract_bundle_dependencies @@ -53,10 +55,29 @@ def test_extract_bundle_dependencies_missing_key() -> None: assert deps == [] +def test_extract_bundle_dependencies_rejects_object_without_id() -> None: + """Malformed bundle_dependencies objects must fail manifest validation, not be skipped.""" + metadata: dict[str, Any] = {"bundle_dependencies": [{"version": ">=1.0.0"}]} + with pytest.raises(ValueError, match="non-empty 'id'"): + _extract_bundle_dependencies(metadata) + + +def test_extract_bundle_dependencies_rejects_empty_id_object() -> None: + metadata: dict[str, Any] = {"bundle_dependencies": [{"id": "", "version": ">=1.0.0"}]} + with pytest.raises(ValueError, match="non-empty 'id'"): + _extract_bundle_dependencies(metadata) + + +def test_extract_bundle_dependencies_rejects_empty_string_entry() -> None: + metadata: dict[str, Any] = {"bundle_dependencies": ["nold-ai/specfact-project", ""]} + with pytest.raises(ValueError, match="string entry must be non-empty"): + _extract_bundle_dependencies(metadata) + + # ── core_compatibility actionable error ─────────────────────────────────────── -def test_validate_install_manifest_constraints_actionable_error(tmp_path: object) -> None: +def test_validate_install_manifest_constraints_actionable_error() -> None: """core_compatibility mismatch must produce actionable message, not bare ValueError.""" from specfact_cli.registry.module_installer import _validate_install_manifest_constraints @@ -66,8 +87,6 @@ def test_validate_install_manifest_constraints_actionable_error(tmp_path: object "core_compatibility": ">=99.0.0,<100.0.0", # impossibly high — always fails } - import pytest - with pytest.raises((ValueError, SystemExit)) as exc_info: _validate_install_manifest_constraints( metadata, diff --git a/tools/smart_test_coverage.py b/tools/smart_test_coverage.py index c447ddf7..92a2e292 100755 --- a/tools/smart_test_coverage.py +++ b/tools/smart_test_coverage.py @@ -681,6 +681,58 @@ def _parse_total_coverage_percent(output_lines: list[str]) -> float: pass return coverage_percentage + def _coverage_data_file_path(self) -> Path: + """Path to the coverage data file (matches ``[tool.coverage.run] data_file`` in pyproject.toml).""" + return self.project_root / "logs" / "tests" / "coverage" / ".coverage" + + def _run_coverage_report_subprocess(self) -> list[str]: + """Run ``coverage report -m`` and return output lines (for parsing TOTAL %%).""" + cmd = [sys.executable, "-m", "coverage", "report", "-m"] + try: + proc = subprocess.run( + cmd, + cwd=self.project_root, + capture_output=True, + text=True, + timeout=600, + check=False, + ) + except (OSError, subprocess.TimeoutExpired) as exc: + logger.warning("coverage report subprocess failed: %s", exc) + return [] + text = (proc.stdout or "") + (proc.stderr or "") + if proc.returncode not in (0, 2) and not text.strip(): + logger.warning("coverage report failed with rc=%s", proc.returncode) + return [] + lines_out: list[str] = [] + for line in text.splitlines(keepends=True): + if not line.endswith("\n"): + line += "\n" + lines_out.append(line) + return lines_out + + def _append_coverage_report_if_needed(self, output_lines: list[str], log_file: TextIO) -> None: + """Append ``coverage report`` output when ``coverage run`` left no parsable TOTAL line. + + Hatch ``run-cov`` uses ``coverage run -m pytest`` without printing a report; line coverage + only appears after ``coverage report``. Without this, smart-test always logged 0%% coverage. + """ + if self._parse_total_coverage_percent(output_lines) > 0: + return + if not self._coverage_data_file_path().is_file(): + logger.debug( + "Skipping coverage report append: no data file at %s", + self._coverage_data_file_path(), + ) + return + report_lines = self._run_coverage_report_subprocess() + if not report_lines: + return + for line in report_lines: + sys.stdout.write(line) + log_file.write(line) + output_lines.append(line) + @staticmethod def _pytest_count_from_banner_line(line: str) -> int | None: """Parse count from ``======== N passed`` style summary lines.""" @@ -751,11 +803,14 @@ def _run_coverage_hatch_or_pytest(self, log_file: TextIO) -> tuple[int | None, l pytest_cmd = self._build_pytest_cmd(with_coverage=True, parallel=True) rc2, out2, _ = self._popen_stream_to_log(pytest_cmd, log_file, timeout=timeout_full) output_lines.extend(out2) + self._append_coverage_report_if_needed(output_lines, log_file) return rc2 if rc2 is not None else 1, output_lines + self._append_coverage_report_if_needed(output_lines, log_file) return rc, output_lines pytest_cmd = self._build_pytest_cmd(with_coverage=True, parallel=True) rc, out, _ = self._popen_stream_to_log(pytest_cmd, log_file, timeout=timeout_full) output_lines.extend(out) + self._append_coverage_report_if_needed(output_lines, log_file) return rc if rc is not None else 1, output_lines def _run_leveled_hatch_or_pytest( @@ -782,12 +837,18 @@ def _run_leveled_hatch_or_pytest( logger.debug("Executing fallback: %s", shlex.join(pytest_cmd)) rc2, out2, _ = self._popen_stream_to_log(pytest_cmd, log_file, timeout=timeout_seconds) output_lines.extend(out2) + if want_coverage: + self._append_coverage_report_if_needed(output_lines, log_file) return rc2 if rc2 is not None else 1, output_lines + if want_coverage: + self._append_coverage_report_if_needed(output_lines, log_file) return rc, output_lines pytest_cmd = self._build_pytest_cmd(with_coverage=want_coverage, extra_args=test_file_strings) logger.info("Hatch disabled; executing pytest directly: %s", shlex.join(pytest_cmd)) rc, out, _ = self._popen_stream_to_log(pytest_cmd, log_file, timeout=timeout_seconds) output_lines.extend(out) + if want_coverage: + self._append_coverage_report_if_needed(output_lines, log_file) return rc if rc is not None else 1, output_lines def _adjust_success_for_coverage_threshold( @@ -1192,7 +1253,9 @@ def _run_tests(self, test_files: list[Path], test_level: str) -> tuple[bool, int test_count = self._parse_pytest_test_count(output_lines) success = return_code == 0 - if test_level in ("integration", "e2e"): + # Integration, E2E, and contract "scenarios" use pytest without line coverage; do not + # parse TOTAL from output (would be 0%). Match integration/e2e advisory semantics. + if test_level in ("integration", "e2e", "scenarios"): coverage_percentage = 100.0 tested_coverage_percentage = 100.0 else: @@ -1474,7 +1537,7 @@ def show_latest_log(self) -> None: except Exception as e: logger.error("Error reading log file: %s", e) - @require(lambda test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) + @require(lambda self, test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) @ensure(lambda result: isinstance(result, bool), "run_smart_tests must return bool") def run_smart_tests(self, test_level: str = "auto", force: bool = False) -> bool: """Run tests with smart change detection and specified level.""" @@ -1485,11 +1548,7 @@ def run_smart_tests(self, test_level: str = "auto", force: bool = False) -> bool config_changed = self._has_config_changes() if source_changed or test_changed or config_changed or force: - ok, ran_any = self._run_changed_only() - if force and not ran_any: - logger.info("Force mode: no incremental tests ran — running full suite.") - return self._run_full_tests() - return ok + return self._run_changed_only() # No changes - use cached data status = self.get_status() logger.info( @@ -1503,7 +1562,7 @@ def run_smart_tests(self, test_level: str = "auto", force: bool = False) -> bool return self.run_tests_by_level(test_level) return self.run_tests_by_level(test_level) - @require(lambda test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) + @require(lambda self, test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) @ensure(lambda result: isinstance(result, bool), "run_tests_by_level must return bool") def run_tests_by_level(self, test_level: str) -> bool: """Run tests by specified level: unit, folder, integration, e2e, or full.""" @@ -1710,25 +1769,11 @@ def _run_full_tests(self) -> bool: self._update_cache(True, test_count, coverage_percentage, enforce_threshold=False) return success - def _run_changed_only(self) -> tuple[bool, bool]: + def _run_changed_only(self) -> bool: """Run only tests impacted by changes since last cached hashes. - - Returns: - (success, ran_any): whether the run succeeded, and whether any test - subprocess actually ran (False means everything was skipped as no-op). - - Unit: tests mapped from modified source files + directly modified unit tests - Integration/E2E: only directly modified tests - Baseline: if there is no ``last_full_run`` in cache, runs the full suite first. - ``run_smart_tests(..., force=True)`` may also fall back to full when incremental - mode would otherwise no-op. - """ - if not self.cache.get("last_full_run"): - logger.info( - "No prior full test run in cache — running full suite to establish baseline for incremental runs." - ) - return self._run_full_tests(), True - + No full-suite fallback here; CI should catch broader regressions.""" # Collect modified items modified_sources = self._get_modified_files() modified_tests = self._get_modified_test_files() @@ -1792,9 +1837,9 @@ def dedupe(paths: list[Path]) -> list[Path]: logger.info("No changed files detected that map to tests - skipping test execution") # Still keep cache timestamp to allow future git comparisons self._update_cache(True, 0, self.cache.get("coverage_percentage", 0.0), enforce_threshold=False) - return True, False + return True - return overall_success, True + return overall_success @require(lambda test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) @ensure(lambda result: isinstance(result, bool), "force_full_run must return bool") From bdef6f1489a3f213b79c2b6c59e3e158c8f1f221 Mon Sep 17 00:00:00 2001 From: Dominikus Nold Date: Thu, 2 Apr 2026 23:22:27 +0200 Subject: [PATCH 7/8] fix(tools): align _run_changed_only with tuple return and baseline full run - Return (success, ran_any) from _run_changed_only; run full suite when no last_full_run - run_smart_tests(auto, force): fall back to full tests when incremental ran nothing - Fix wow e2e fixture typing (Iterator[None]) for basedpyright Unblocks PR #477 CI: type-check, tests, lint job. Made-with: Cursor --- tests/e2e/test_wow_entrypoint.py | 3 ++- tools/smart_test_coverage.py | 26 ++++++++++++++++++-------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/tests/e2e/test_wow_entrypoint.py b/tests/e2e/test_wow_entrypoint.py index b9d76d0f..478ce085 100644 --- a/tests/e2e/test_wow_entrypoint.py +++ b/tests/e2e/test_wow_entrypoint.py @@ -8,6 +8,7 @@ from __future__ import annotations import subprocess +from collections.abc import Iterator from pathlib import Path from unittest.mock import patch @@ -20,7 +21,7 @@ @pytest.fixture(autouse=True) -def _reset_registry() -> None: +def _reset_registry() -> Iterator[None]: CommandRegistry._clear_for_testing() yield CommandRegistry._clear_for_testing() diff --git a/tools/smart_test_coverage.py b/tools/smart_test_coverage.py index 92a2e292..52ca8f3b 100755 --- a/tools/smart_test_coverage.py +++ b/tools/smart_test_coverage.py @@ -1548,7 +1548,10 @@ def run_smart_tests(self, test_level: str = "auto", force: bool = False) -> bool config_changed = self._has_config_changes() if source_changed or test_changed or config_changed or force: - return self._run_changed_only() + ok, ran_any = self._run_changed_only() + if force and not ran_any: + return self._run_full_tests() + return ok # No changes - use cached data status = self.get_status() logger.info( @@ -1769,11 +1772,15 @@ def _run_full_tests(self) -> bool: self._update_cache(True, test_count, coverage_percentage, enforce_threshold=False) return success - def _run_changed_only(self) -> bool: + def _run_changed_only(self) -> tuple[bool, bool]: """Run only tests impacted by changes since last cached hashes. - - Unit: tests mapped from modified source files + directly modified unit tests - - Integration/E2E: only directly modified tests - No full-suite fallback here; CI should catch broader regressions.""" + + Returns: + (success, ran_any): ``ran_any`` is False when no mapped tests ran (incremental no-op). + + When there is no ``last_full_run`` baseline and incremental work would run nothing, + runs a one-time full suite to establish coverage/hash baseline (avoids zero cached coverage). + """ # Collect modified items modified_sources = self._get_modified_files() modified_tests = self._get_modified_test_files() @@ -1834,12 +1841,15 @@ def dedupe(paths: list[Path]) -> list[Path]: overall_success = overall_success and ok if not ran_any: + if not self.cache.get("last_full_run"): + logger.info("No incremental baseline; running full test suite once to establish cache…") + success = self._run_full_tests() + return success, True logger.info("No changed files detected that map to tests - skipping test execution") - # Still keep cache timestamp to allow future git comparisons self._update_cache(True, 0, self.cache.get("coverage_percentage", 0.0), enforce_threshold=False) - return True + return True, False - return overall_success + return overall_success, True @require(lambda test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) @ensure(lambda result: isinstance(result, bool), "force_full_run must return bool") From a94ed5cc6b4e87b961e998b6b22df430791ffc63 Mon Sep 17 00:00:00 2001 From: Dominikus Nold Date: Fri, 3 Apr 2026 00:28:13 +0200 Subject: [PATCH 8/8] chore(release): bump to 0.45.1 and update OpenSpec tasks status - Sync version across pyproject.toml, setup.py, and __init__ modules - Changelog: 0.45.1 entry for dependency profiles, smart-test baseline, CI, UX - openspec: rolling status snapshot and task checkboxes for PR verification - Includes prior branch work: init/profile, module registry, docs entry path, workflows Made-with: Cursor --- .github/workflows/pr-orchestrator.yml | 6 +- .github/workflows/specfact.yml | 4 +- CHANGELOG.md | 13 +- docs/index.md | 4 +- .../docs-new-user-onboarding/proposal.md | 18 ++- .../changes/docs-new-user-onboarding/tasks.md | 35 +++-- pyproject.toml | 38 ++--- setup.py | 31 ++-- src/__init__.py | 2 +- src/specfact_cli/__init__.py | 2 +- src/specfact_cli/cli.py | 3 +- .../module_registry/module-package.yaml | 6 +- .../modules/module_registry/src/commands.py | 95 ++++++++----- .../registry/dependency_resolver.py | 41 +++++- src/specfact_cli/registry/module_installer.py | 19 ++- tests/e2e/test_bundle_extraction_e2e.py | 3 + tests/e2e/test_wow_entrypoint.py | 75 ++++++---- tests/integration/test_bundle_install.py | 3 + .../unit/registry/test_dependency_resolver.py | 51 ++++++- tests/unit/registry/test_module_installer.py | 7 + .../test_module_upgrade_improvements.py | 12 +- .../test_module_not_found_error.py | 13 +- tests/unit/tools/test_smart_test_coverage.py | 1 + .../test_bundle_dependency_install.py | 3 + tools/contract_first_smart_test.py | 4 +- tools/smart_test_coverage.py | 133 ++++++++++++++---- 26 files changed, 449 insertions(+), 173 deletions(-) diff --git a/.github/workflows/pr-orchestrator.yml b/.github/workflows/pr-orchestrator.yml index d75d4130..b865862b 100644 --- a/.github/workflows/pr-orchestrator.yml +++ b/.github/workflows/pr-orchestrator.yml @@ -210,7 +210,7 @@ jobs: run: | python -m pip install --upgrade pip pip install "hatch" "virtualenv<21" coverage "coverage[toml]" pytest pytest-cov pytest-mock pytest-asyncio pytest-xdist pytest-timeout - pip install -e . + pip install -e ".[dev]" - name: Cache hatch environments if: needs.changes.outputs.skip_tests_dev_to_main != 'true' @@ -495,7 +495,7 @@ jobs: - name: Install type-check dependencies run: | python -m pip install --upgrade pip - pip install -e . basedpyright + pip install -e ".[dev]" - name: Run type checking run: | echo "🔍 Running basedpyright type checking..." @@ -533,7 +533,7 @@ jobs: - name: Install lint dependencies run: | python -m pip install --upgrade pip - pip install -e . ruff basedpyright pylint + pip install -e ".[dev]" - name: Run linting run: | diff --git a/.github/workflows/specfact.yml b/.github/workflows/specfact.yml index f89e0d19..f883e94b 100644 --- a/.github/workflows/specfact.yml +++ b/.github/workflows/specfact.yml @@ -62,8 +62,8 @@ jobs: - name: Install SpecFact CLI run: | - echo "📦 Installing SpecFact CLI..." - pip install -e . + echo "📦 Installing SpecFact CLI (contracts extra for repro / CrossHair)..." + pip install -e ".[contracts]" - name: Enforce Core-Module Isolation run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index c2935162..62c7e0e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,18 @@ All notable changes to this project will be documented in this file. --- -## [0.45.0] - 2026-04-02 +## [0.45.1] - 2026-04-03 + +### Changed + +- **Dependency install profiles**: the default wheel is slimmer—CrossHair, Hypothesis, Ruff, Radon, + and unused pins (`python-dotenv`, `cffi`) are no longer in core `dependencies`. Use + `pip install specfact-cli[contracts]` for CrossHair + Hypothesis, or `pip install specfact-cli[dev]` + for contributors. `packaging` is pinned explicitly for module installer / PEP 440 use. +- **Smart-test baseline fallback**: incremental smart-test runs now establish a full-suite baseline when + no `last_full_run` cache exists (avoids a no-op incremental pass and misleading zero coverage). +- **Pre-commit single-invocation overwrite handling**: staged Python files are passed to the code-review + helper in one batch so `.specfact/code-review.json` is not overwritten by multiple `xargs` processes. ### Fixed diff --git a/docs/index.md b/docs/index.md index 0f90cbed..d8d81fb9 100644 --- a/docs/index.md +++ b/docs/index.md @@ -112,7 +112,9 @@ Installed modules add command groups such as `project`, `backlog`, `code`, `spec ## Modules Documentation -`docs.specfact.io` is the default starting point for first-time readers on this site, and the **canonical starting point for the core CLI story** in the docs split. Move to the modules site when you need **module-deep workflows**, bundle-specific adapters, and authoring guidance. +`docs.specfact.io` is the default starting point and the **canonical starting point for the core CLI story** +for first-time readers on this site. Move to the modules site when you need **module-deep workflows**, +bundle-specific adapters, and authoring guidance. - **[Modules Docs Home](https://modules.specfact.io/)** - Backlog, project, spec, govern - **[Module Development](https://modules.specfact.io/authoring/module-development/)** - Build your own modules diff --git a/openspec/changes/docs-new-user-onboarding/proposal.md b/openspec/changes/docs-new-user-onboarding/proposal.md index f7e34663..9bec3688 100644 --- a/openspec/changes/docs-new-user-onboarding/proposal.md +++ b/openspec/changes/docs-new-user-onboarding/proposal.md @@ -135,9 +135,17 @@ can truthfully describe a vibe-coder entry sequence. ## Source Tracking -- **GitHub Issue**: [#476](https://github.com/nold-ai/specfact-cli/issues/476) -- **Issue URL**: -- **Parent Feature**: [#356](https://github.com/nold-ai/specfact-cli/issues/356) — Documentation & Discrepancy Remediation ([tracking comment](https://github.com/nold-ai/specfact-cli/issues/356#issuecomment-4180162525)) -- **Related (overlap)**: [#466](https://github.com/nold-ai/specfact-cli/issues/466) — first-contact / onboarding ([cross-link comment](https://github.com/nold-ai/specfact-cli/issues/466#issuecomment-4180162609)) +- **GitHub Issue**: [issue-476] +- **Issue URL**: [issue-476-url] +- **Parent Feature**: [issue-356] — Documentation & Discrepancy Remediation ([tracking comment][comment-356]) +- **Related (overlap)**: [issue-466] — first-contact / onboarding ([cross-link comment][comment-466]) - **Repository**: nold-ai/specfact-cli -- **Last Synced Status**: in-progress — issue created with labels `enhancement`, `change-proposal`, `documentation`, `openspec` +- **Last Synced Status**: in-progress — issue created with labels `enhancement`, `change-proposal`, `documentation`, + `openspec` + +[issue-476]: https://github.com/nold-ai/specfact-cli/issues/476 +[issue-476-url]: https://github.com/nold-ai/specfact-cli/issues/476 +[issue-356]: https://github.com/nold-ai/specfact-cli/issues/356 +[comment-356]: https://github.com/nold-ai/specfact-cli/issues/356#issuecomment-4180162525 +[issue-466]: https://github.com/nold-ai/specfact-cli/issues/466 +[comment-466]: https://github.com/nold-ai/specfact-cli/issues/466#issuecomment-4180162609 diff --git a/openspec/changes/docs-new-user-onboarding/tasks.md b/openspec/changes/docs-new-user-onboarding/tasks.md index 5636e56e..281fcdd4 100644 --- a/openspec/changes/docs-new-user-onboarding/tasks.md +++ b/openspec/changes/docs-new-user-onboarding/tasks.md @@ -1,3 +1,13 @@ +## Current status (rolling) + +**Branch:** `feature/docs-new-user-onboarding` (worktree active; tracks `origin/feature/docs-new-user-onboarding`). +**Release packaging:** Patch **0.45.1** — `pyproject.toml`, `setup.py`, `src/__init__.py`, `src/specfact_cli/__init__.py`, and `CHANGELOG.md` aligned (see top changelog section). +**Done on branch:** Tasks **1–4, 6–7c, 8–10**; **7d** partial (7d.9, 7d.10, 7d.17; install `--yes` on upgrade from 7b; bundle-dep tests in `test_bundle_dependency_install.py`); **core_compatibility** messaging and registry parsing for versioned `bundle_dependencies`. +**Open / follow-up:** **5** (scope UX) — **specfact-code-review** module repo; **7d.11–7d.16, 7d.18** — full resolver graph, `--dry-run`, registry index objects in modules repo; **11.1** — merge deltas to `openspec/specs/` when project sync workflow is available; **12–13** — re-run gates before merge, PR to `dev`, then archive. +**Evidence:** `TDD_EVIDENCE.md` (contract-test + yaml-lint + unit suite on 2026-04-02); re-run **7.1–7.2** and **12.1–12.3** before final PR if `main`/`dev` moved. + +--- + ## 1. Investigate and locate bug roots - [x] 1.1 Find where `specfact init --profile ` is handled in the init module source @@ -67,8 +77,8 @@ ## 7. Run pre-docs TDD gate -- [ ] 7.1 Run `hatch run contract-test` — confirm passing *(run before PR merge)* -- [ ] 7.2 Run `hatch run smart-test` — confirm passing *(run before PR merge)* +- [x] 7.1 Run `hatch run contract-test` — confirm passing *(passed 2026-04-02 per `TDD_EVIDENCE.md`; **re-run before PR merge** if base moved)* +- [ ] 7.2 Run `hatch run smart-test` — confirm passing *(run before PR merge; use `smart-test-full` if touching `src/` broadly)* - [x] 7.3 Run `hatch run format` and `hatch run type-check` — confirm zero errors - [x] 7.4 Record post-fix passing evidence in `TDD_EVIDENCE.md` - [ ] 7.5 End-to-end manual test on a clean machine: `uvx specfact-cli init --profile solo-developer` @@ -118,6 +128,8 @@ ## 7d. Version-aware bundle dependency resolution +**Progress:** 7d.9, 7d.10, 7d.17 implemented; `test_bundle_dependency_install.py` covers ordered bundle install; full interactive resolver / `--dry-run` / circular graph (7d.11–7d.16) **not** wired — see `TDD_EVIDENCE.md` “Deferred / follow-up”. + - [ ] 7d.1 Write failing test: installing a module whose `bundle_dependencies` lists a module not installed prompts the user and installs the dep on confirmation - [ ] 7d.2 Write failing test: installing a module whose declared dep version specifier is @@ -197,15 +209,18 @@ - [ ] 11.1 Run `openspec sync --change docs-new-user-onboarding` to merge all 10 spec deltas *(blocked: OpenSpec CLI in this environment has no `sync` subcommand — use project workflow when available)* - [ ] 11.2 Confirm `openspec/specs/docs-aha-moment-entry/spec.md` created + *(delta exists: `openspec/changes/docs-new-user-onboarding/specs/docs-aha-moment-entry/spec.md`; **not** merged to main specs yet)* - [ ] 11.3 Confirm `openspec/specs/docs-vibecoder-entry-path/spec.md` created -- [ ] 11.4 Confirm `openspec/specs/dependency-resolution/spec.md` created + *(delta exists: `openspec/changes/docs-new-user-onboarding/specs/docs-vibecoder-entry-path/spec.md`; **not** merged to main specs yet)* +- [x] 11.4 Confirm `openspec/specs/dependency-resolution/spec.md` created *(main spec present; delta under this change may still differ until 11.1)* - [ ] 11.5 Confirm MODIFIED requirements in `entrypoint-onboarding`, `first-contact-story`, `first-run-selection`, `profile-presets`, and `module-installation` specs are updated + *(deltas under `openspec/changes/docs-new-user-onboarding/specs/`; merge to `openspec/specs/` pending 11.1)* ## 12. Final Validation and Evidence -- [ ] 12.1 Run `hatch run yaml-lint` — confirm zero failures *(before PR)* -- [ ] 12.2 Run `hatch run contract-test` — confirm passing *(before PR)* +- [x] 12.1 Run `hatch run yaml-lint` — confirm zero failures *(passed 2026-04-02 per `TDD_EVIDENCE.md`; **re-run before PR** if YAML/workflows changed since)* +- [x] 12.2 Run `hatch run contract-test` — confirm passing *(passed 2026-04-02 per `TDD_EVIDENCE.md`; **re-run before PR** if contracts/sources changed since)* - [ ] 12.3 Run `hatch run specfact code review run --json --out .specfact/code-review.json` and confirm zero findings on modified Python files *(before PR)* - [ ] 12.4 Build docs locally (`bundle exec jekyll serve`) and manually verify: @@ -216,8 +231,10 @@ ## 13. PR and Cleanup -- [ ] 13.1 Create feature branch `feature/docs-new-user-onboarding` from `origin/dev` -- [ ] 13.2 Commit CLI fixes: `fix: init --profile installs profile modules, fix module-install under uvx` -- [ ] 13.3 Commit docs: `docs: vibe-coder entry path — uvx hero, code review wow moment` -- [ ] 13.4 Open PR against `dev` referencing this change and the three CLI bugs fixed +- [x] 13.1 Create feature branch `feature/docs-new-user-onboarding` from `origin/dev` *(branch exists; pushed to `origin`)* +- [x] 13.2 Commit CLI fixes: `fix: init --profile installs profile modules, fix module-install under uvx` + *(landed on branch — see git log; may be squashed across commits)* +- [x] 13.3 Commit docs: `docs: vibe-coder entry path — uvx hero, code review wow moment` + *(landed on branch — see git log; follow-on commits include README, dependency-profile work, 0.45.1 changelog)* +- [ ] 13.4 Open PR against `dev` referencing this change and the three CLI bugs fixed *(or update existing PR; confirm checks green)* - [ ] 13.5 After merge, archive: `openspec archive docs-new-user-onboarding` diff --git a/pyproject.toml b/pyproject.toml index d5fb771c..9527bb9b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "specfact-cli" -version = "0.45.0" +version = "0.45.1" description = "The swiss knife CLI for agile DevOps teams. Keep backlog, specs, tests, and code in sync with validation and contract enforcement for new projects and long-lived codebases." readme = "README.md" requires-python = ">=3.11" @@ -36,6 +36,10 @@ keywords = [ "cli", "specfact", ] +# Install profiles (PEP 621 extras): +# - `pip install specfact-cli` — minimal runtime (CLI, registry, contracts via icontract/beartype, no CrossHair/Hypothesis wheels). +# - `pip install specfact-cli[contracts]` — add CrossHair + Hypothesis for contract exploration / property tooling. +# - `pip install specfact-cli[dev]` — contributors: pytest, linters, and same contract tools as [contracts] (see dev list). classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", @@ -51,14 +55,14 @@ classifiers = [ dependencies = [ # Core dependencies "pydantic>=2.12.3", - "python-dotenv>=1.2.1", "typing-extensions>=4.15.0", "PyYAML>=6.0.3", "requests>=2.32.3", "azure-identity>=1.17.1", "cryptography>=43.0.0", - "cffi>=1.17.1", - + "packaging>=24.0", + # PEP 440 / markers for module installer and registry (do not rely on transitive pins only) + # CLI framework "typer>=0.20.0", "rich>=13.5.2,<13.6.0", # Compatible with semgrep (requires rich~=13.5.2) @@ -80,16 +84,10 @@ dependencies = [ # Schema validation "jsonschema>=4.23.0", - # Contract-First Development Dependencies + # Contract-First (runtime decorators; exploration tools are optional extra `contracts`) "icontract>=2.7.1", # Design-by-contract decorators "beartype>=0.22.4", # Runtime type checking - "crosshair-tool>=0.0.97", # Contract exploration and counterexample discovery - "hypothesis>=6.142.4", # Property-based testing - - # Code analysis - "ruff>=0.14.2", - "radon>=6.0.1", - + # File system watching "watchdog>=6.0.0", @@ -99,6 +97,11 @@ dependencies = [ ] [project.optional-dependencies] +contracts = [ + "crosshair-tool>=0.0.97", + "hypothesis>=6.142.4", +] + dev = [ "pytest>=8.4.2", "pytest-cov>=7.0.0", @@ -114,12 +117,14 @@ dev = [ "types-PyYAML>=6.0.12.20250516", "pip-tools>=7.5.1", "semgrep>=1.144.0", # Latest version compatible with rich~=13.5.2 - - # Contract-First Development Dependencies (dev) - "icontract>=2.7.1", - "beartype>=0.22.4", + + # Same contract exploration stack as [contracts] (extras cannot self-reference) "crosshair-tool>=0.0.97", "hypothesis>=6.142.4", + + # Contract-First Development Dependencies (dev) + "icontract>=2.7.1", + "beartype>=0.22.4", # Enhanced Analysis Tools (for local development) # Note: syft excluded from dev/test due to rich version conflict with semgrep @@ -170,7 +175,6 @@ dependencies = [ "pytest-cov", "pytest-mock", "pytest-xdist", - "python-dotenv", "pre-commit", # Ensure format/lint tools are available in the hatch env "isort>=7.0.0", diff --git a/setup.py b/setup.py index 0b867b3f..974b79a0 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -"""Setup script for specfact-cli package.""" +"""Setup script for specfact-cli package (kept in sync with pyproject.toml [project].dependencies).""" from setuptools import find_packages, setup @@ -7,7 +7,7 @@ if __name__ == "__main__": _setup = setup( name="specfact-cli", - version="0.45.0", + version="0.45.1", description=( "The swiss knife CLI for agile DevOps teams. Keep backlog, specs, tests, and code in sync with " "validation and contract enforcement for new projects and long-lived codebases." @@ -15,21 +15,26 @@ packages=find_packages(where="src"), package_dir={"": "src"}, install_requires=[ - "pydantic>=2.11.5", - "python-dotenv>=1.1.0", - "PyYAML>=6.0.2", + "pydantic>=2.12.3", + "typing-extensions>=4.15.0", + "PyYAML>=6.0.3", "requests>=2.32.3", "azure-identity>=1.17.1", "cryptography>=43.0.0", - "cffi>=1.17.1", - "typer>=0.15.0", + "packaging>=24.0", + "typer>=0.20.0", "rich>=13.5.2,<13.6.0", - "jinja2>=3.1.0", - "networkx>=3.2", - "gitpython>=3.1.0", + "questionary>=2.0.1", + "jinja2>=3.1.6", + "networkx>=3.4.2", + "graphviz>=0.20.1", + "gitpython>=3.1.45", + "ruamel.yaml>=0.18.16", + "jsonschema>=4.23.0", "icontract>=2.7.1", - "beartype>=0.22.2", - "crosshair-tool>=0.0.97", - "hypothesis>=6.140.3", + "beartype>=0.22.4", + "watchdog>=6.0.0", + "opentelemetry-sdk>=1.27.0", + "opentelemetry-exporter-otlp-proto-http>=1.27.0", ], ) diff --git a/src/__init__.py b/src/__init__.py index ca0330ca..b02d2da8 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -3,4 +3,4 @@ """ # Package version: keep in sync with pyproject.toml, setup.py, src/specfact_cli/__init__.py -__version__ = "0.45.0" +__version__ = "0.45.1" diff --git a/src/specfact_cli/__init__.py b/src/specfact_cli/__init__.py index 0d5806f9..ee71846a 100644 --- a/src/specfact_cli/__init__.py +++ b/src/specfact_cli/__init__.py @@ -45,6 +45,6 @@ def _bootstrap_bundle_paths() -> None: _bootstrap_bundle_paths() -__version__ = "0.45.0" +__version__ = "0.45.1" __all__ = ["__version__"] diff --git a/src/specfact_cli/cli.py b/src/specfact_cli/cli.py index 2c0282b9..e8a99444 100644 --- a/src/specfact_cli/cli.py +++ b/src/specfact_cli/cli.py @@ -126,8 +126,7 @@ def _print_missing_bundle_command_help(invoked: str) -> None: f"[bold red]Module '{module_id}' is not installed.[/bold red]\n" f"The [bold]{invoked}[/bold] command group is provided by that module. " f"Install with [bold]specfact module install {module_id}[/bold], " - "or run [bold]specfact init --profile solo-developer[/bold] (or another profile) " - "to install bundles." + "or run [bold]specfact init --profile [/bold] to install bundles." ) return console.print( diff --git a/src/specfact_cli/modules/module_registry/module-package.yaml b/src/specfact_cli/modules/module_registry/module-package.yaml index 6ab99ec2..4750280d 100644 --- a/src/specfact_cli/modules/module_registry/module-package.yaml +++ b/src/specfact_cli/modules/module_registry/module-package.yaml @@ -1,5 +1,5 @@ name: module-registry -version: 0.1.14 +version: 0.1.15 commands: - module category: core @@ -17,5 +17,5 @@ publisher: description: 'Manage modules: search, list, show, install, and upgrade.' license: Apache-2.0 integrity: - checksum: sha256:de47d37a18c4509a043036520d645f46419c56f0756e26cd193b23df7a3955fb - signature: z1s/9AKFJFLROy7RoV/Nk0zPaaPWNjsnNvHb0mw9DS3tbc8q0WDuZVVFvLAyvln3yi1YulQMIRJUFlSI8GYnCQ== + checksum: sha256:b4628edc05c19e73ae7148dcbfa62edec29f2dc9eab6d1d496274449246065cb + signature: f5jHpyzf+/ZK2mweUkAHzca110RtEgGk+wplafFqV/WzpKZ2B31dGWc4k8VxGC4wm6Bvrz0HTFkSla95P/F2Dw== diff --git a/src/specfact_cli/modules/module_registry/src/commands.py b/src/specfact_cli/modules/module_registry/src/commands.py index 1ada19d5..19ffc96d 100644 --- a/src/specfact_cli/modules/module_registry/src/commands.py +++ b/src/specfact_cli/modules/module_registry/src/commands.py @@ -92,6 +92,14 @@ def _upgrade_module_names_valid(module_names: list[str] | None) -> bool: return all(m.strip() != "" for m in module_names) +def _install_module_ids_nonempty(module_ids: list[str]) -> bool: + return bool(module_ids) and all(m.strip() != "" for m in module_ids) + + +def _uninstall_module_names_nonempty(module_names: list[str]) -> bool: + return bool(module_names) and all(m.strip() != "" for m in module_names) + + def _publisher_url_from_metadata(metadata: object | None) -> str: if not metadata: return "n/a" @@ -281,6 +289,7 @@ def _install_one( @app.command() +@require(_install_module_ids_nonempty, "at least one non-blank module id is required") @beartype def install( module_ids: Annotated[ @@ -313,9 +322,6 @@ def install( ), ) -> None: """Install one or more modules from bundled artifacts or marketplace registry.""" - if not module_ids: - console.print("[red]At least one module id is required.[/red]") - raise typer.Exit(1) if version is not None and sum(1 for mid in module_ids if mid.strip()) > 1: console.print( "[red]--version applies to a single module; install one module at a time or omit --version.[/red]" @@ -325,8 +331,6 @@ def install( target_root = _resolve_install_target_root(scope_normalized, repo) discovered_by_name = {entry.metadata.name: entry for entry in discover_all_modules()} for module_id in module_ids: - if not module_id.strip(): - continue success = _install_one( module_id, scope_normalized, @@ -391,14 +395,22 @@ def _uninstall_from_explicit_scope( if not project_module_dir.exists(): console.print(f"[red]Module '{normalized}' is not installed in project scope ({project_root}).[/red]") raise typer.Exit(1) - shutil.rmtree(project_module_dir) + try: + shutil.rmtree(project_module_dir) + except OSError as exc: + console.print(f"[red]Could not remove module directory {project_module_dir}: {exc}[/red]") + raise typer.Exit(1) from exc console.print(f"[green]Uninstalled[/green] {normalized} from {project_root}") return True if scope_normalized == "user": if not user_module_dir.exists(): console.print(f"[red]Module '{normalized}' is not installed in user scope ({user_root}).[/red]") raise typer.Exit(1) - shutil.rmtree(user_module_dir) + try: + shutil.rmtree(user_module_dir) + except OSError as exc: + console.print(f"[red]Could not remove module directory {user_module_dir}: {exc}[/red]") + raise typer.Exit(1) from exc console.print(f"[green]Uninstalled[/green] {normalized} from {user_root}") return True return False @@ -452,6 +464,7 @@ def _uninstall_marketplace_default(normalized: str) -> None: @app.command() +@require(_uninstall_module_names_nonempty, "at least one non-blank module name is required") @beartype def uninstall( module_names: Annotated[ @@ -462,14 +475,9 @@ def uninstall( repo: Path | None = typer.Option(None, "--repo", help="Repository path for project scope (default: current dir)"), ) -> None: """Uninstall one or more marketplace modules.""" - if not module_names: - console.print("[red]At least one module name is required.[/red]") - raise typer.Exit(1) failed = False for module_name in module_names: stripped = module_name.strip() - if not stripped: - continue try: _uninstall_single_module(stripped, scope, repo) except ClickExit as exc: @@ -1043,21 +1051,27 @@ def _full_marketplace_module_id_for_install(target: str) -> str: return f"nold-ai/specfact-{short}" -def _latest_version_from_registry_index(module_id: str) -> str | None: - idx = fetch_registry_index() +def _latest_version_map_from_registry_index(idx: dict[str, Any] | None) -> dict[str, str]: + """Build module id -> latest_version from a single registry index fetch.""" + out: dict[str, str] = {} if not idx: - return None + return out mods = idx.get("modules", []) if not isinstance(mods, list): - return None + return out for raw in mods: - if isinstance(raw, dict) and str(raw.get("id", "")).strip() == module_id: - lv = raw.get("latest_version") - if lv is None: - return None - s = str(lv).strip() - return s or None - return None + if not isinstance(raw, dict): + continue + mid = str(raw.get("id", "")).strip() + if not mid: + continue + lv = raw.get("latest_version") + if lv is None: + continue + s = str(lv).strip() + if s: + out[mid] = s + return out def _versions_equal_for_upgrade(current: str, latest: str) -> bool: @@ -1083,18 +1097,24 @@ def _resolve_one_upgrade_name(raw: str, by_id: dict[str, dict[str, Any]]) -> str if "/" not in normalized and f"specfact-{normalized}" in by_id: candidates.append(f"specfact-{normalized}") for cand in candidates: - if cand in by_id: - source = str(by_id[cand].get("source", "unknown")) - if source != "marketplace": - console.print( - f"[red]Cannot upgrade '{cand}' from source '{source}'. " - "Only marketplace modules are upgradeable.[/red]" - ) - raise typer.Exit(1) + if cand not in by_id: + continue + source = str(by_id[cand].get("source", "unknown")) + if source != "marketplace": + console.print( + f"[red]Cannot upgrade '{cand}' from source '{source}'. Only marketplace modules are upgradeable.[/red]" + ) + raise typer.Exit(1) + return cand + marketplace_by_id = {k: v for k, v in by_id.items() if str(v.get("source", "")) == "marketplace"} + candidates2 = [normalized] + if "/" not in normalized and f"specfact-{normalized}" in marketplace_by_id: + candidates2.append(f"specfact-{normalized}") + for cand in candidates2: + if cand in marketplace_by_id: return cand - if "/" in normalized: - return normalized - return f"specfact/{normalized}" + console.print(f"[red]Module '{normalized}' is not installed and cannot be upgraded.[/red]") + raise typer.Exit(1) def _resolve_upgrade_target_ids( @@ -1114,6 +1134,7 @@ def _resolve_upgrade_target_ids( def _run_marketplace_upgrades( target_ids: list[str], by_id: dict[str, dict[str, Any]], + latest_by_id: dict[str, str], *, yes: bool = False, ) -> None: @@ -1129,7 +1150,7 @@ def _run_marketplace_upgrades( current_v = str(row.get("version", "unknown")).strip() latest_v = str(row.get("latest_version") or "").strip() if not latest_v: - latest_v = (_latest_version_from_registry_index(full_id) or "").strip() + latest_v = (latest_by_id.get(full_id, "") or "").strip() if latest_v and _versions_equal_for_upgrade(current_v, latest_v): up_to_date.append(full_id) @@ -1204,7 +1225,9 @@ def upgrade( target_ids = _resolve_upgrade_target_ids(module_names, all, modules, by_id) if not target_ids: return - _run_marketplace_upgrades(target_ids, by_id, yes=yes) + index = fetch_registry_index() + latest_by_id = _latest_version_map_from_registry_index(index) + _run_marketplace_upgrades(target_ids, by_id, latest_by_id, yes=yes) # Expose standard ModuleIOContract operations for protocol compliance discovery. diff --git a/src/specfact_cli/registry/dependency_resolver.py b/src/specfact_cli/registry/dependency_resolver.py index 56d69b2d..c4f7b2ed 100644 --- a/src/specfact_cli/registry/dependency_resolver.py +++ b/src/specfact_cli/registry/dependency_resolver.py @@ -25,6 +25,10 @@ class PipDependencyValidationUnavailableError(RuntimeError): """Raised when pip is unavailable and pip dependency validation must not be skipped.""" +class PipDependencyInstallError(Exception): + """Raised when installation of resolved pip requirements fails.""" + + @beartype def _pip_tools_available() -> bool: """Return True if pip-compile is available.""" @@ -46,18 +50,22 @@ def _run_pip_compile(constraints: list[str]) -> list[str]: if not constraints: return [] with tempfile.TemporaryDirectory() as tmp: - reqs = Path(tmp) / "requirements.in" + tmp_path = Path(tmp) + reqs = tmp_path / "requirements.in" + out_path = tmp_path / "requirements.txt" reqs.write_text("\n".join(constraints), encoding="utf-8") result = subprocess.run( - ["pip-compile", "--dry-run", "--no-annotate", str(reqs)], + ["pip-compile", "--no-annotate", "-o", str(out_path), str(reqs)], capture_output=True, text=True, timeout=120, ) if result.returncode != 0: raise DependencyConflictError(result.stderr or result.stdout or "pip-compile failed") - out = (Path(tmp) / "requirements.txt").read_text() if (Path(tmp) / "requirements.txt").exists() else "" - if not out: + if not out_path.exists(): + return [] + out = out_path.read_text(encoding="utf-8") + if not out.strip(): return [] return [L.strip() for L in out.splitlines() if L.strip() and not L.strip().startswith("#")] @@ -151,3 +159,28 @@ def resolve_dependencies( if _pip_tools_available(): return _run_pip_compile(constraints) return _run_basic_resolver(constraints, allow_unvalidated=allow_unvalidated) + + +@beartype +@require(lambda pinned: isinstance(pinned, list) and all(isinstance(x, str) for x in pinned)) +def install_resolved_pip_requirements(pinned: list[str]) -> None: + """Install pinned or constraint lines into the active interpreter (same as the CLI). + + If ``pip`` is not available (e.g. minimal uvx runtime), logs a warning and returns without raising. + Raises :class:`PipDependencyInstallError` when pip is present but installation fails. + """ + if not pinned: + return + if not _pip_module_available(): + logger.warning( + "pip is not available in this environment; skipping install of %s marketplace pip " + "requirement(s). Install them manually or use a full Python environment.", + len(pinned), + ) + return + cmd = [sys.executable, "-m", "pip", "install", "--no-input", *pinned] + logger.info("Installing %s resolved pip requirement(s) for marketplace modules", len(pinned)) + result = subprocess.run(cmd, capture_output=True, text=True, timeout=600) + if result.returncode != 0: + detail = (result.stderr or result.stdout or "pip install failed").strip() + raise PipDependencyInstallError(detail) diff --git a/src/specfact_cli/registry/module_installer.py b/src/specfact_cli/registry/module_installer.py index 064effba..f52c4f4c 100644 --- a/src/specfact_cli/registry/module_installer.py +++ b/src/specfact_cli/registry/module_installer.py @@ -24,7 +24,12 @@ from specfact_cli.common import get_bridge_logger from specfact_cli.models.module_package import ModulePackageMetadata from specfact_cli.registry.crypto_validator import verify_checksum, verify_signature -from specfact_cli.registry.dependency_resolver import DependencyConflictError, resolve_dependencies +from specfact_cli.registry.dependency_resolver import ( + DependencyConflictError, + PipDependencyInstallError, + install_resolved_pip_requirements, + resolve_dependencies, +) from specfact_cli.registry.marketplace_client import download_module from specfact_cli.registry.module_discovery import discover_all_modules from specfact_cli.registry.module_security import assert_module_allowed, ensure_publisher_trusted @@ -143,7 +148,8 @@ def _extract_bundle_dependencies(metadata: dict[str, Any]) -> list[str]: dependencies: list[str] = [] for index, value in enumerate(raw_dependencies): if isinstance(value, dict): - raw_id = value.get("id") + entry = cast(dict[str, Any], value) + raw_id = entry.get("id") if raw_id is None or not str(raw_id).strip(): raise ValueError( f"bundle_dependencies[{index}]: object entry must include non-empty 'id' " @@ -801,13 +807,20 @@ def _install_bundle_dependencies_for_module( try: all_metas = [e.metadata for e in discover_all_modules()] all_metas.append(metadata_obj) - resolve_dependencies(all_metas, allow_unvalidated=True) + resolved = resolve_dependencies(all_metas, allow_unvalidated=True) except DependencyConflictError as dep_err: if not force: raise ValueError( f"Dependency conflict: {dep_err}. Use --force to bypass or --skip-deps to skip resolution." ) from dep_err logger.warning("Dependency conflict bypassed by --force: %s", dep_err) + return + if not resolved: + return + try: + install_resolved_pip_requirements(resolved) + except PipDependencyInstallError as pip_err: + raise ValueError(f"Failed to install resolved pip dependencies: {pip_err}") from pip_err def _atomic_place_verified_module( diff --git a/tests/e2e/test_bundle_extraction_e2e.py b/tests/e2e/test_bundle_extraction_e2e.py index 85b0d180..b937a990 100644 --- a/tests/e2e/test_bundle_extraction_e2e.py +++ b/tests/e2e/test_bundle_extraction_e2e.py @@ -96,6 +96,9 @@ def test_publish_install_verify_roundtrip_for_specfact_codebase(monkeypatch, tmp assert tarball.exists() monkeypatch.setattr("specfact_cli.registry.module_installer.resolve_dependencies", lambda *_a, **_k: None) + monkeypatch.setattr( + "specfact_cli.registry.module_installer.install_resolved_pip_requirements", lambda *_a, **_k: None + ) monkeypatch.setattr("specfact_cli.registry.module_installer.verify_module_artifact", lambda *_a, **_k: True) monkeypatch.setattr("specfact_cli.registry.module_installer.ensure_publisher_trusted", lambda *_a, **_k: None) monkeypatch.setattr("specfact_cli.registry.module_installer.assert_module_allowed", lambda *_a, **_k: None) diff --git a/tests/e2e/test_wow_entrypoint.py b/tests/e2e/test_wow_entrypoint.py index 478ce085..c80b29a9 100644 --- a/tests/e2e/test_wow_entrypoint.py +++ b/tests/e2e/test_wow_entrypoint.py @@ -10,7 +10,6 @@ import subprocess from collections.abc import Iterator from pathlib import Path -from unittest.mock import patch import pytest from typer.testing import CliRunner @@ -30,32 +29,9 @@ def _reset_registry() -> Iterator[None]: runner = CliRunner() -def test_init_solo_developer_exits_zero_in_temp_git_repo(tmp_path: Path) -> None: - """Documented path step 1: init --profile solo-developer in a repo (git init like a real user).""" - subprocess.run(["git", "init"], cwd=tmp_path, check=True, capture_output=True) - with ( - patch("specfact_cli.modules.init.src.commands.install_bundles_for_init", lambda *a, **k: None), - patch( - "specfact_cli.modules.init.src.commands.get_discovered_modules_for_state", - lambda **_: [{"id": "init", "enabled": True}], - ), - patch("specfact_cli.modules.init.src.commands.write_modules_state", lambda _: None), - patch("specfact_cli.modules.init.src.commands.run_discovery_and_write_cache", lambda _: None), - patch("specfact_cli.modules.init.src.commands.is_first_run", lambda **_: True), - ): - result = runner.invoke( - app, - ["init", "--repo", str(tmp_path), "--profile", "solo-developer"], - catch_exceptions=False, - ) - assert result.exit_code == 0, result.stdout + result.stderr - - -def test_after_wow_profile_mock_bundles_registry_lists_code_for_step_two( - monkeypatch: pytest.MonkeyPatch, tmp_path: Path -) -> None: - """Step 2 needs code + code-review bundles; registry exposes `code` group when both are 'installed'.""" - subprocess.run(["git", "init"], cwd=tmp_path, check=True, capture_output=True) +@pytest.fixture +def patch_init_wow_dependencies(monkeypatch: pytest.MonkeyPatch) -> None: + """Stub init side effects so profile install can be exercised without real bundle I/O.""" monkeypatch.setattr( "specfact_cli.modules.init.src.commands.install_bundles_for_init", lambda *a, **k: None, @@ -65,8 +41,29 @@ def test_after_wow_profile_mock_bundles_registry_lists_code_for_step_two( lambda **_: [{"id": "init", "enabled": True}], ) monkeypatch.setattr("specfact_cli.modules.init.src.commands.write_modules_state", lambda _: None) - monkeypatch.setattr("specfact_cli.modules.init.src.commands.run_discovery_and_write_cache", lambda _: None) + monkeypatch.setattr( + "specfact_cli.modules.init.src.commands.run_discovery_and_write_cache", + lambda _: None, + ) monkeypatch.setattr("specfact_cli.modules.init.src.commands.is_first_run", lambda **_: True) + + +def test_init_solo_developer_exits_zero_in_temp_git_repo(tmp_path: Path, patch_init_wow_dependencies: None) -> None: + """Documented path step 1: init --profile solo-developer in a repo (git init like a real user).""" + subprocess.run(["git", "init"], cwd=tmp_path, check=True, capture_output=True) + result = runner.invoke( + app, + ["init", "--repo", str(tmp_path), "--profile", "solo-developer"], + catch_exceptions=False, + ) + assert result.exit_code == 0, result.stdout + result.stderr + + +def test_after_wow_profile_mock_bundles_registry_lists_code_for_step_two( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path, patch_init_wow_dependencies: None +) -> None: + """Step 2 needs code + code-review bundles; registry exposes `code` group when both are 'installed'.""" + subprocess.run(["git", "init"], cwd=tmp_path, check=True, capture_output=True) init_r = runner.invoke( app, ["init", "--repo", str(tmp_path), "--profile", "solo-developer"], @@ -82,3 +79,25 @@ def test_after_wow_profile_mock_bundles_registry_lists_code_for_step_two( register_builtin_commands() names = CommandRegistry.list_commands() assert "code" in names, f"Expected code group when codebase+code-review bundles present; got {names}" + + +def test_after_wow_profile_only_code_review_does_not_expose_code_command( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path, patch_init_wow_dependencies: None +) -> None: + """Category groups map specfact-codebase -> `code`; code-review alone must not mount that group.""" + subprocess.run(["git", "init"], cwd=tmp_path, check=True, capture_output=True) + init_r = runner.invoke( + app, + ["init", "--repo", str(tmp_path), "--profile", "solo-developer"], + catch_exceptions=False, + ) + assert init_r.exit_code == 0 + + CommandRegistry._clear_for_testing() + monkeypatch.setattr( + "specfact_cli.registry.module_packages.get_installed_bundles", + lambda _p, _e: ["specfact-code-review"], + ) + register_builtin_commands() + names = CommandRegistry.list_commands() + assert "code" not in names, f"Expected no `code` group when only specfact-code-review is installed; got {names}" diff --git a/tests/integration/test_bundle_install.py b/tests/integration/test_bundle_install.py index 91dfb248..fc71e753 100644 --- a/tests/integration/test_bundle_install.py +++ b/tests/integration/test_bundle_install.py @@ -50,6 +50,9 @@ def _create_module_tarball( def _stub_install_runtime(monkeypatch) -> None: monkeypatch.setattr("specfact_cli.registry.module_installer.resolve_dependencies", lambda *_a, **_k: None) + monkeypatch.setattr( + "specfact_cli.registry.module_installer.install_resolved_pip_requirements", lambda *_a, **_k: None + ) monkeypatch.setattr("specfact_cli.registry.module_installer.verify_module_artifact", lambda *_a, **_k: True) monkeypatch.setattr("specfact_cli.registry.module_installer.ensure_publisher_trusted", lambda *_a, **_k: None) monkeypatch.setattr("specfact_cli.registry.module_installer.assert_module_allowed", lambda *_a, **_k: None) diff --git a/tests/unit/registry/test_dependency_resolver.py b/tests/unit/registry/test_dependency_resolver.py index 0f28037f..44b5a3f7 100644 --- a/tests/unit/registry/test_dependency_resolver.py +++ b/tests/unit/registry/test_dependency_resolver.py @@ -2,13 +2,15 @@ from __future__ import annotations -from unittest.mock import patch +from unittest.mock import MagicMock, patch import pytest from specfact_cli.models.module_package import ModulePackageMetadata, VersionedPipDependency from specfact_cli.registry.dependency_resolver import ( DependencyConflictError, + PipDependencyInstallError, + install_resolved_pip_requirements, resolve_dependencies, ) @@ -135,3 +137,50 @@ def test_clear_error_messages_for_conflicts( msg = str(exc_info.value) assert "requests" in msg assert "Suggest" in msg or "force" in msg or "skip-deps" in msg + + +class TestInstallResolvedPipRequirements: + """Tests for install_resolved_pip_requirements.""" + + def test_no_op_when_empty(self) -> None: + with patch("specfact_cli.registry.dependency_resolver.subprocess.run") as mock_run: + install_resolved_pip_requirements([]) + mock_run.assert_not_called() + + def test_invokes_pip_install_with_pins(self) -> None: + ok = MagicMock() + ok.returncode = 0 + with ( + patch("specfact_cli.registry.dependency_resolver._pip_module_available", return_value=True), + patch("specfact_cli.registry.dependency_resolver.subprocess.run") as mock_run, + ): + mock_run.return_value = ok + install_resolved_pip_requirements(["requests==2.31.0", "pydantic==2.5.0"]) + mock_run.assert_called_once() + cmd = mock_run.call_args[0][0] + assert "pip" in cmd + assert "install" in cmd + assert "--no-input" in cmd + assert "requests==2.31.0" in cmd + assert "pydantic==2.5.0" in cmd + + def test_skips_when_pip_module_unavailable(self) -> None: + with ( + patch("specfact_cli.registry.dependency_resolver._pip_module_available", return_value=False), + patch("specfact_cli.registry.dependency_resolver.subprocess.run") as mock_run, + ): + install_resolved_pip_requirements(["x==1"]) + mock_run.assert_not_called() + + def test_raises_on_pip_failure(self) -> None: + bad = MagicMock() + bad.returncode = 1 + bad.stderr = "boom" + bad.stdout = "" + with ( + patch("specfact_cli.registry.dependency_resolver._pip_module_available", return_value=True), + patch("specfact_cli.registry.dependency_resolver.subprocess.run") as mock_run, + ): + mock_run.return_value = bad + with pytest.raises(PipDependencyInstallError): + install_resolved_pip_requirements(["x==1"]) diff --git a/tests/unit/registry/test_module_installer.py b/tests/unit/registry/test_module_installer.py index f2f8129d..56ba1827 100644 --- a/tests/unit/registry/test_module_installer.py +++ b/tests/unit/registry/test_module_installer.py @@ -21,6 +21,10 @@ def _no_op_resolve_dependencies(monkeypatch: pytest.MonkeyPatch) -> None: "specfact_cli.registry.module_installer.resolve_dependencies", lambda *_a, **_k: None, ) + monkeypatch.setattr( + "specfact_cli.registry.module_installer.install_resolved_pip_requirements", + lambda *_a, **_k: None, + ) def _create_module_tarball( @@ -118,6 +122,9 @@ def test_install_module_logs_satisfied_dependencies_without_warning(monkeypatch, "specfact_cli.registry.module_installer.ensure_publisher_trusted", lambda *_args, **_kwargs: None ) monkeypatch.setattr("specfact_cli.registry.module_installer.resolve_dependencies", lambda *_args, **_kwargs: None) + monkeypatch.setattr( + "specfact_cli.registry.module_installer.install_resolved_pip_requirements", lambda *_args, **_kwargs: None + ) monkeypatch.setattr("specfact_cli.registry.module_installer.discover_all_modules", list) mock_logger = MagicMock() diff --git a/tests/unit/specfact_cli/modules/test_module_upgrade_improvements.py b/tests/unit/specfact_cli/modules/test_module_upgrade_improvements.py index 0f13b595..f642f867 100644 --- a/tests/unit/specfact_cli/modules/test_module_upgrade_improvements.py +++ b/tests/unit/specfact_cli/modules/test_module_upgrade_improvements.py @@ -44,7 +44,7 @@ def _fake_install(module_id: str, reinstall: bool = False, **kwargs: object) -> return tmp_path / "backlog" with patch("specfact_cli.modules.module_registry.src.commands.install_module", side_effect=_fake_install): - _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id) + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, {}) assert not install_called, "install_module must NOT be called when module is already at latest version" @@ -66,7 +66,7 @@ def test_run_marketplace_upgrades_all_at_latest_prints_up_to_date( test_console = Console(file=output_buf, highlight=False, markup=True) with patch("specfact_cli.modules.module_registry.src.commands.console", test_console): - _run_marketplace_upgrades(["nold-ai/specfact-backlog", "nold-ai/specfact-codebase"], by_id) + _run_marketplace_upgrades(["nold-ai/specfact-backlog", "nold-ai/specfact-codebase"], by_id, {}) output = output_buf.getvalue() @@ -107,7 +107,7 @@ def _fake_read_version(module_dir: Path) -> str: ), patch("specfact_cli.modules.module_registry.src.commands.console", test_console), ): - _run_marketplace_upgrades(["nold-ai/specfact-backlog", "nold-ai/specfact-codebase"], by_id) + _run_marketplace_upgrades(["nold-ai/specfact-backlog", "nold-ai/specfact-codebase"], by_id, {}) output = output_buf.getvalue() assert "Upgraded" in output, "Must have Upgraded section" @@ -181,7 +181,7 @@ def _fake_confirm(message: str, **kwargs: object) -> bool: patch("specfact_cli.modules.module_registry.src.commands.typer.confirm", side_effect=_fake_confirm), patch("specfact_cli.modules.module_registry.src.commands.install_module") as mock_install, ): - _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id) + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, {}) output = output_buf.getvalue() # Must show major bump warning @@ -199,7 +199,7 @@ def test_run_marketplace_upgrades_skips_major_in_ci_mode(tmp_path: Path) -> None patch("specfact_cli.modules.module_registry.src.commands.is_non_interactive", return_value=True), patch("specfact_cli.modules.module_registry.src.commands.install_module") as mock_install, ): - _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, yes=False) + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, {}, yes=False) mock_install.assert_not_called() @@ -224,6 +224,6 @@ def _fake_read_version(p: Path) -> str: ), patch("specfact_cli.modules.module_registry.src.commands.typer.confirm") as mock_confirm, ): - _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, yes=True) + _run_marketplace_upgrades(["nold-ai/specfact-backlog"], by_id, {}, yes=True) mock_confirm.assert_not_called() # --yes flag skips prompt diff --git a/tests/unit/specfact_cli/test_module_not_found_error.py b/tests/unit/specfact_cli/test_module_not_found_error.py index b8706773..a03c1b26 100644 --- a/tests/unit/specfact_cli/test_module_not_found_error.py +++ b/tests/unit/specfact_cli/test_module_not_found_error.py @@ -40,18 +40,17 @@ def test_module_not_found_error_includes_uvx_command() -> None: output = _unstyled(result.output) assert result.exit_code != 0 - # Should include uvx-compatible command or at least solo-developer profile - assert "solo-developer" in output or "uvx" in output or "specfact init" in output, ( - f"Error must include actionable uvx init command: {output!r}" + assert "specfact init" in output or "uvx" in output or "--profile" in output, ( + f"Error must include actionable init/profile guidance: {output!r}" ) -def test_module_not_found_error_includes_solo_developer_profile() -> None: - """Module-not-found error for 'code' command must specifically mention solo-developer profile.""" +def test_module_not_found_error_includes_init_profile_placeholder() -> None: + """Module-not-found error for 'code' command must include init --profile guidance.""" result = runner.invoke(app, ["code"]) output = _unstyled(result.output) assert result.exit_code != 0 - # The corrective command must be copy-pasteable - assert "solo-developer" in output, f"Error must mention 'solo-developer' profile as corrective action: {output!r}" + assert "specfact init" in output, f"Error must mention specfact init: {output!r}" + assert "--profile" in output or "" in output, f"Error must suggest a profile: {output!r}" diff --git a/tests/unit/tools/test_smart_test_coverage.py b/tests/unit/tools/test_smart_test_coverage.py index aa6dbaf0..f10de220 100644 --- a/tests/unit/tools/test_smart_test_coverage.py +++ b/tests/unit/tools/test_smart_test_coverage.py @@ -503,6 +503,7 @@ def test_run_smart_tests_no_changes(self, mock_run_tests): "test_count": 150, "coverage_percentage": 85.5, "success": True, + "last_run": "2025-01-01T12:00:00", } result = self.manager.run_smart_tests() diff --git a/tests/unit/validators/test_bundle_dependency_install.py b/tests/unit/validators/test_bundle_dependency_install.py index 614780e8..2fa5109f 100644 --- a/tests/unit/validators/test_bundle_dependency_install.py +++ b/tests/unit/validators/test_bundle_dependency_install.py @@ -45,6 +45,9 @@ def _create_module_tarball( def _stub_integrity_and_deps(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr("specfact_cli.registry.module_installer.resolve_dependencies", lambda *_args, **_kwargs: None) + monkeypatch.setattr( + "specfact_cli.registry.module_installer.install_resolved_pip_requirements", lambda *_args, **_kwargs: None + ) monkeypatch.setattr("specfact_cli.registry.module_installer.verify_module_artifact", lambda *_args, **_kwargs: True) monkeypatch.setattr( "specfact_cli.registry.module_installer.ensure_publisher_trusted", lambda *_args, **_kwargs: None diff --git a/tools/contract_first_smart_test.py b/tools/contract_first_smart_test.py index a9cd65ba..2a5a4bbd 100644 --- a/tools/contract_first_smart_test.py +++ b/tools/contract_first_smart_test.py @@ -713,7 +713,7 @@ def _run_contract_exploration( return success, exploration_results - def _run_scenario_tests(self) -> tuple[bool, int, float]: + def _run_scenario_tests(self) -> tuple[bool, int, float | None]: """Run scenario tests (integration tests with contract references).""" logger.info("Running scenario tests...") @@ -739,7 +739,7 @@ def _run_scenario_tests(self) -> tuple[bool, int, float]: if not scenario_tests: logger.info("No scenario tests found (integration tests with contract references)") - return True, 0, 100.0 + return True, 0, None logger.info("Found %d scenario tests:", len(scenario_tests)) for test_file in scenario_tests: diff --git a/tools/smart_test_coverage.py b/tools/smart_test_coverage.py index 52ca8f3b..571dfba7 100755 --- a/tools/smart_test_coverage.py +++ b/tools/smart_test_coverage.py @@ -39,6 +39,7 @@ from pathlib import Path from typing import Any, TextIO, cast +from beartype import beartype from icontract import ensure, require @@ -856,10 +857,12 @@ def _adjust_success_for_coverage_threshold( success: bool, test_level: str, test_count: int, - coverage_percentage: float, + coverage_percentage: float | None, output_lines: list[str], ) -> bool: """Treat threshold-only failures as success for unit/folder runs when appropriate.""" + if coverage_percentage is None: + return success if success or test_level not in ("unit", "folder") or test_count <= 0 or coverage_percentage <= 0: return success if not any(self._line_indicates_coverage_threshold_failure(line) for line in output_lines): @@ -877,15 +880,21 @@ def _log_completed_test_run( success: bool, test_level: str, test_count: int, - coverage_percentage: float, - tested_coverage_percentage: float, + coverage_percentage: float | None, + tested_coverage_percentage: float | None, test_log_file: Path, coverage_log_file: Path, return_code: int | None, ) -> None: """Emit summary log lines after a leveled test run.""" if success: - if test_level in ("unit", "folder") and tested_coverage_percentage > 0: + if coverage_percentage is None or tested_coverage_percentage is None: + logger.info( + "%s tests completed: %d tests; line coverage not measured for this level", + test_level.title(), + test_count, + ) + elif test_level in ("unit", "folder") and tested_coverage_percentage > 0: logger.info( "%s tests completed: %d tests, %.1f%% overall, %.1f%% tested code coverage", test_level.title(), @@ -907,7 +916,9 @@ def _log_completed_test_run( logger.info("Check %s test log for details: %s", test_level, test_log_file) logger.info("Check %s coverage log for details: %s", test_level, coverage_log_file) - def _log_tested_coverage_vs_threshold(self, test_level: str, tested_coverage_percentage: float) -> None: + def _log_tested_coverage_vs_threshold(self, test_level: str, tested_coverage_percentage: float | None) -> None: + if tested_coverage_percentage is None: + return if test_level not in ("unit", "folder") or tested_coverage_percentage <= 0: return if tested_coverage_percentage < self.coverage_threshold: @@ -1018,6 +1029,20 @@ def _get_unit_tests_for_files(self, modified_files: list[Path]) -> list[Path]: return unit_tests + def _modified_sources_proven_by_unit_batch( + self, modified_sources: list[Path], unit_tests_run: list[Path] + ) -> list[Path]: + """Return modified sources whose full set of mapped unit tests was included in the batch run.""" + run = {str(p.resolve()) for p in unit_tests_run} + proven: list[Path] = [] + for src in modified_sources: + mapped = self._get_unit_tests_for_files([src]) + if not mapped: + continue + if all(str(t.resolve()) in run for t in mapped): + proven.append(src) + return proven + def _get_files_in_folders(self, modified_folders: set[Path]) -> list[Path]: """Get all source files in the modified folders.""" folder_files: list[Path] = [] @@ -1200,8 +1225,11 @@ def _run_coverage_tests(self) -> tuple[bool, int, float]: logger.error("Error running tests: %s", e) return False, 0, 0 - def _run_tests(self, test_files: list[Path], test_level: str) -> tuple[bool, int, float]: - """Run tests for specific files and return (success, test_count, coverage_percentage).""" + def _run_tests(self, test_files: list[Path], test_level: str) -> tuple[bool, int, float | None]: + """Run tests for specific files and return (success, test_count, coverage_percentage). + + ``coverage_percentage`` is None for levels without reliable line coverage (integration/e2e/scenarios). + """ if not test_files: logger.info("No %s tests found to run", test_level) return True, 0, 100.0 @@ -1253,18 +1281,16 @@ def _run_tests(self, test_files: list[Path], test_level: str) -> tuple[bool, int test_count = self._parse_pytest_test_count(output_lines) success = return_code == 0 - # Integration, E2E, and contract "scenarios" use pytest without line coverage; do not - # parse TOTAL from output (would be 0%). Match integration/e2e advisory semantics. + # Integration, E2E, scenarios: line coverage is not a reliable metric for this runner. if test_level in ("integration", "e2e", "scenarios"): - coverage_percentage = 100.0 - tested_coverage_percentage = 100.0 + coverage_percentage = None + tested_coverage_percentage = None else: coverage_percentage = self._parse_total_coverage_percent(output_lines) - - if test_level in ("unit", "folder") and test_files: - tested_coverage_percentage = self._calculate_tested_coverage(test_files, output_lines) - else: - tested_coverage_percentage = coverage_percentage + if test_level in ("unit", "folder") and test_files: + tested_coverage_percentage = self._calculate_tested_coverage(test_files, output_lines) + else: + tested_coverage_percentage = coverage_percentage success = self._adjust_success_for_coverage_threshold( success, test_level, test_count, coverage_percentage, output_lines @@ -1347,8 +1373,10 @@ def _check_coverage_threshold(self, coverage_percentage: float): ) def _maybe_warn_subthreshold_non_full( - self, success: bool, enforce_threshold: bool, coverage_percentage: float + self, success: bool, enforce_threshold: bool, coverage_percentage: float | None ) -> None: + if coverage_percentage is None: + return if success and enforce_threshold: self._check_coverage_threshold(coverage_percentage) elif success and not enforce_threshold and coverage_percentage < self.coverage_threshold: @@ -1382,12 +1410,13 @@ def _update_cache( self, success: bool, test_count: int, - coverage_percentage: float, + coverage_percentage: float | None, enforce_threshold: bool = True, update_only: bool = False, updated_sources: list[Path] | None = None, updated_tests: list[Path] | None = None, updated_configs: list[Path] | None = None, + update_coverage_in_cache: bool = True, ) -> None: """Update cache and hashes. If update_only is True, only update hashes for provided file lists (when their tests passed). @@ -1417,10 +1446,15 @@ def update_map(paths: list[Path] | None, target: dict[str, str]): self._refresh_all_tracked_hashes(file_hashes, test_file_hashes, config_file_hashes) # Update cache; keep last_full_run as the last index time (not necessarily a full suite) + prior_cov = float(self.cache.get("coverage_percentage", 0.0)) + if coverage_percentage is None or not update_coverage_in_cache: + cov_for_cache = prior_cov + else: + cov_for_cache = coverage_percentage self.cache.update( { "last_full_run": datetime.now().isoformat(), - "coverage_percentage": coverage_percentage if success else self.cache.get("coverage_percentage", 0), + "coverage_percentage": cov_for_cache if success else self.cache.get("coverage_percentage", 0), "file_hashes": file_hashes, "test_file_hashes": test_file_hashes, "config_file_hashes": config_file_hashes, @@ -1537,6 +1571,7 @@ def show_latest_log(self) -> None: except Exception as e: logger.error("Error reading log file: %s", e) + @beartype @require(lambda self, test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) @ensure(lambda result: isinstance(result, bool), "run_smart_tests must return bool") def run_smart_tests(self, test_level: str = "auto", force: bool = False) -> bool: @@ -1552,8 +1587,11 @@ def run_smart_tests(self, test_level: str = "auto", force: bool = False) -> bool if force and not ran_any: return self._run_full_tests() return ok - # No changes - use cached data + # No changes - use cached data only when a baseline run has been recorded status = self.get_status() + if not status.get("last_run"): + logger.info("No cached full-run baseline; running full test suite once…") + return self._run_full_tests() logger.info( "Using cached results: %d tests, %.1f%% coverage", status["test_count"], @@ -1565,10 +1603,13 @@ def run_smart_tests(self, test_level: str = "auto", force: bool = False) -> bool return self.run_tests_by_level(test_level) return self.run_tests_by_level(test_level) + @beartype @require(lambda self, test_level: test_level in {"unit", "folder", "integration", "e2e", "full", "auto"}) @ensure(lambda result: isinstance(result, bool), "run_tests_by_level must return bool") def run_tests_by_level(self, test_level: str) -> bool: - """Run tests by specified level: unit, folder, integration, e2e, or full.""" + """Run tests by specified level: unit, folder, integration, e2e, full, or auto (smart detection).""" + if test_level == "auto": + return self.run_smart_tests("auto", force=False) if test_level == "unit": return self._run_unit_tests() if test_level == "folder": @@ -1718,8 +1759,15 @@ def _run_integration_tests(self) -> bool: enforce_threshold=False, update_only=True, updated_tests=integration_tests, + update_coverage_in_cache=False, ) - logger.info("Integration tests completed: %d tests, %.1f%% coverage", test_count, coverage_percentage) + if coverage_percentage is None: + logger.info( + "Integration tests completed: %d tests; line coverage not measured for this level", + test_count, + ) + else: + logger.info("Integration tests completed: %d tests, %.1f%% coverage", test_count, coverage_percentage) logger.info( "Note: Integration test coverage is not enforced - focus is on component interaction validation" ) @@ -1755,8 +1803,15 @@ def _run_e2e_tests(self) -> bool: enforce_threshold=False, update_only=True, updated_tests=e2e_tests, + update_coverage_in_cache=False, ) - logger.info("E2E tests completed: %d tests, %.1f%% coverage", test_count, coverage_percentage) + if coverage_percentage is None: + logger.info( + "E2E tests completed: %d tests; line coverage not measured for this level", + test_count, + ) + else: + logger.info("E2E tests completed: %d tests, %.1f%% coverage", test_count, coverage_percentage) logger.info("Note: E2E test coverage is not enforced - focus is on full workflow validation") else: logger.error("E2E tests failed") @@ -1785,6 +1840,12 @@ def _run_changed_only(self) -> tuple[bool, bool]: modified_sources = self._get_modified_files() modified_tests = self._get_modified_test_files() + if modified_sources and self.cache.get("last_full_run"): + unmapped = [s for s in modified_sources if not self._get_unit_tests_for_files([s])] + if unmapped: + logger.info("Modified source(s) have no unit-mapped tests; running full suite to verify baseline.") + return self._run_full_tests(), True + # Map modified sources to unit tests unit_from_sources = self._get_unit_tests_for_files(modified_sources) # Split modified tests by level @@ -1812,14 +1873,14 @@ def dedupe(paths: list[Path]) -> list[Path]: ran_any = True ok, unit_count, unit_cov = self._run_tests(unit_tests, "unit") if ok: - # Update hashes only for modified sources we mapped and the unit test files themselves + proven_sources = self._modified_sources_proven_by_unit_batch(modified_sources, unit_tests) self._update_cache( True, unit_count, unit_cov, enforce_threshold=False, update_only=True, - updated_sources=modified_sources, + updated_sources=proven_sources, updated_tests=unit_tests, ) overall_success = overall_success and ok @@ -1828,7 +1889,13 @@ def dedupe(paths: list[Path]) -> list[Path]: ok, integ_count, integ_cov = self._run_tests(integ_tests, "integration") if ok: self._update_cache( - True, integ_count, integ_cov, enforce_threshold=False, update_only=True, updated_tests=integ_tests + True, + integ_count, + integ_cov, + enforce_threshold=False, + update_only=True, + updated_tests=integ_tests, + update_coverage_in_cache=False, ) overall_success = overall_success and ok if e2e_tests: @@ -1836,7 +1903,13 @@ def dedupe(paths: list[Path]) -> list[Path]: ok, e2e_count, e2e_cov = self._run_tests(e2e_tests, "e2e") if ok: self._update_cache( - True, e2e_count, e2e_cov, enforce_threshold=False, update_only=True, updated_tests=e2e_tests + True, + e2e_count, + e2e_cov, + enforce_threshold=False, + update_only=True, + updated_tests=e2e_tests, + update_coverage_in_cache=False, ) overall_success = overall_success and ok @@ -1845,8 +1918,10 @@ def dedupe(paths: list[Path]) -> list[Path]: logger.info("No incremental baseline; running full test suite once to establish cache…") success = self._run_full_tests() return success, True + if self._has_config_changes(): + logger.info("Configuration changed but no mapped tests to run; running full suite…") + return self._run_full_tests(), True logger.info("No changed files detected that map to tests - skipping test execution") - self._update_cache(True, 0, self.cache.get("coverage_percentage", 0.0), enforce_threshold=False) return True, False return overall_success, True @@ -1859,6 +1934,8 @@ def force_full_run(self, test_level: str = "full") -> bool: if test_level == "full": success, test_count, coverage_percentage = self._run_coverage_tests() self._update_cache(success, test_count, coverage_percentage, enforce_threshold=True) + elif test_level == "auto": + success = self.run_smart_tests("auto", force=True) else: success = self.run_tests_by_level(test_level) return success