diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..b6593074 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,9 @@ +repos: + - repo: local + hooks: + - id: verify-module-signatures + name: Verify module signatures and version bumps + entry: hatch run ./scripts/verify-modules-signature.py --require-signature --enforce-version-bump + language: system + pass_filenames: false + always_run: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 94096f54..7f157cda 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,29 @@ All notable changes to this project will be documented in this file. **Important:** Changes need to be documented below this block as this is the header section. Each section should be separated by a horizontal rule. Newer changelog entries need to be added on top of prior ones to keep the history chronological with most recent changes first. +--- + +## [0.37.5] - 2026-02-25 + +### Fixed + +- Backlog refine/write-back now resolves ADO custom field targets deterministically across mapped canonical fields, preventing fallback to unintended defaults (for example Story Points field drift). +- Backlog refine tmp import contract and parser guidance were aligned across backlog prompts, including mandatory stable `ID` usage and provider-specific structure requirements. +- ADO markdown write-back and extraction handling were hardened: markdown-supported fields are formatted consistently, duplicate description headings are stripped, and rich-text normalization preserves line breaks and non-HTML angle-bracket content. +- Refine import/update safeguards now prevent title pollution (`## Item ...`) and reject significant silent content loss during bulk refinement flows. +- Template and mapping steering for ADO now prefers user-story templates where applicable and includes explicit process/framework selection behavior in mapping workflows. +- Backlog read commands now support `--state any` and `--assignee any` semantics to explicitly disable those filters and avoid confusing empty results caused by hidden defaults. +- Fixed a `daily` regression where explicit `--state any` / `--assignee any` still fell back to standup defaults (`open`/configured assignee) instead of disabling filters. +- GitHub backlog create/type assignment now falls back `story -> feature` by default when native `Story` type is not available in the repository, while preserving explicit mappings when present. +- ADO transport/write paths were hardened with improved retry/diagnostic behavior and clearer default-filter visibility in command output for production-style environments. +- Contract-exploration counterexamples were addressed by tightening converter preconditions and timestamp parsing robustness, and by hardening TODO-marker detection against regex edge cases. +- `specfact module init` command-test assertions now handle isolated user-root output formatting consistently, avoiding brittle path-specific failures in CI and local runs. +- Enforcement preset factory return-path validation no longer triggers spurious beartype return violations in strict test runs. +- Addressed integration/unit regressions in backlog command parsing/help wiring and ADO parent-candidate WIQL request handling introduced during hardening. +- Removed module installer tar extraction deprecation warnings by using safer extraction mode with backward-compatible fallback. +- Docs site rendering was corrected for linked architecture pages by adding missing Jekyll front matter and replacing non-doc relative links with stable GitHub URLs where appropriate. +- Eliminated widespread `ValueError: I/O operation on closed file` CLI/E2E failures by rebinding module-level Rich consoles to the active invocation stream at CLI entry, preventing stale closed capture streams across sequential test runs. + --- ## [0.37.4] - 2026-02-25 diff --git a/README.md b/README.md index 49c4a937..399b781c 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,29 @@ specfact validate sidecar init my-project /path/to/repo specfact validate sidecar run my-project /path/to/repo ``` +### Backlog Bridge (60 seconds) + +SpecFact's USP is closing the drift gap between **backlog -> specs -> code**. + +```bash +# 1) Initialize backlog config + field mapping +specfact backlog init-config --force +specfact backlog map-fields --provider ado --ado-org --ado-project "" + +# 2) Run standup/refinement on real backlog scope +specfact backlog daily ado --ado-org --ado-project "" --state any --assignee any --limit 5 +specfact backlog refine ado --ado-org --ado-project "" --id --preview + +# 3) Keep backlog + spec intent aligned (avoid silent drift) +specfact policy validate --group-by-item +``` + +For GitHub, replace adapter/org/project with: +`specfact backlog daily github --repo-owner --repo-name ...` + +Deep dive: +- **[Backlog Quickstart Demo (GitHub + ADO)](docs/getting-started/tutorial-backlog-quickstart-demo.md)** + **AI IDE quick start** ```bash diff --git a/docs/_layouts/default.html b/docs/_layouts/default.html index 8e422432..248e1e47 100644 --- a/docs/_layouts/default.html +++ b/docs/_layouts/default.html @@ -134,6 +134,7 @@

  • Installation
  • First Steps
  • Module Bootstrap Checklist
  • +
  • Tutorial: Backlog Quickstart Demo
  • Tutorial: Backlog Refine with AI IDE
  • Tutorial: Daily Standup and Sprint Review
  • @@ -166,6 +167,7 @@

    • DevOps Integration Guide
    • Backlog Refinement
    • +
    • Backlog Quickstart Demo
    • Authentication
    • GitHub Adapter
    • Azure DevOps Adapter
    • diff --git a/docs/architecture/README.md b/docs/architecture/README.md index d3d5a39f..bd3f25a1 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -1,3 +1,10 @@ +--- +layout: default +title: Architecture Docs Index +description: Index of SpecFact CLI architecture deep-dive documents. +permalink: /architecture/ +--- + # SpecFact CLI Architecture Documentation Architecture documents in this folder describe the current implementation and clearly separate planned features. diff --git a/docs/getting-started/README.md b/docs/getting-started/README.md index ef4c006f..8a5f65ea 100644 --- a/docs/getting-started/README.md +++ b/docs/getting-started/README.md @@ -55,6 +55,7 @@ uvx specfact-cli@latest plan init my-project --interactive - ๐Ÿ“– **[Tutorial: Using SpecFact with OpenSpec or Spec-Kit](tutorial-openspec-speckit.md)** โญ **NEW** - Complete beginner-friendly tutorial - ๐Ÿ“– **[DevOps Backlog Integration](../guides/devops-adapter-integration.md)** ๐Ÿ†• **NEW FEATURE** - Integrate SpecFact into agile DevOps workflows - ๐Ÿ“– **[Backlog Refinement](../guides/backlog-refinement.md)** ๐Ÿ†• **NEW FEATURE** - AI-assisted template-driven refinement for standardizing work items +- ๐Ÿ“– **[Tutorial: Backlog Quickstart Demo (GitHub + ADO)](tutorial-backlog-quickstart-demo.md)** ๐Ÿ†• - Short end-to-end demo: `init-config`, `map-fields`, `daily`, `refine`, plus create/check loop - ๐Ÿ“– **[Tutorial: Backlog Refine with AI IDE](tutorial-backlog-refine-ai-ide.md)** ๐Ÿ†• - End-to-end for agile DevOps teams: slash prompt, story quality, underspecification, splitting, DoR, custom templates - ๐Ÿ“– **[Tutorial: Daily Standup and Sprint Review](tutorial-daily-standup-sprint-review.md)** ๐Ÿ†• - End-to-end daily standup: auto-detect repo (GitHub/ADO), view standup table, post comment, interactive, Copilot export - ๐Ÿ“– **[Use Cases](../guides/use-cases.md)** - See real-world examples diff --git a/docs/getting-started/tutorial-backlog-quickstart-demo.md b/docs/getting-started/tutorial-backlog-quickstart-demo.md new file mode 100644 index 00000000..4e36980b --- /dev/null +++ b/docs/getting-started/tutorial-backlog-quickstart-demo.md @@ -0,0 +1,254 @@ +--- +layout: default +title: Tutorial - Backlog Quickstart Demo (GitHub + ADO) +description: Short end-to-end demo for backlog init-config, map-fields, daily, and refine on GitHub and Azure DevOps. +permalink: /getting-started/tutorial-backlog-quickstart-demo/ +--- + +# Tutorial: Backlog Quickstart Demo (GitHub + ADO) + +This is a short, copy/paste-friendly demo for new users covering: + +1. `specfact backlog init-config` +2. `specfact backlog map-fields` +3. `specfact backlog daily` +4. `specfact backlog refine` (GitHub + ADO) + +It also includes a minimal create/check loop using `specfact backlog add`. + +Preferred ceremony aliases: + +- `specfact backlog ceremony standup` (same behavior as `backlog daily`) +- `specfact backlog ceremony refinement` (same behavior as `backlog refine`) + +## Targets Used in This Demo + +- **GitHub**: `nold-ai/specfact-demo-repo` +- **Azure DevOps**: `dominikusnold/Specfact CLI` + +## Prerequisites + +- SpecFact CLI installed +- Auth configured: + +```bash +specfact auth github +specfact auth azure-devops +specfact auth status +``` + +Expected status should show both providers as valid. + +## 1) Initialize Backlog Config + +```bash +specfact backlog init-config --force +``` + +This creates `.specfact/backlog-config.yaml`. + +## 2) Map Fields (ADO) + +Run field mapping for your ADO project. This command is interactive by design. + +```bash +specfact backlog map-fields \ + --provider ado \ + --ado-org dominikusnold \ + --ado-project "Specfact CLI" \ + --ado-framework scrum +``` + +Notes: + +- Select the process style intentionally (`--ado-framework scrum|agile|safe|kanban|default`). +- Mapping is written to `.specfact/templates/backlog/field_mappings/ado_custom.yaml`. +- Provider context is updated in `.specfact/backlog.yaml`. + +Optional reset: + +```bash +specfact backlog map-fields \ + --provider ado \ + --ado-org dominikusnold \ + --ado-project "Specfact CLI" \ + --ado-framework scrum \ + --reset +``` + +## 3) Daily Standup View (Check Backlog Read) + +GitHub: + +```bash +specfact backlog daily github \ + --repo-owner nold-ai \ + --repo-name specfact-demo-repo \ + --state open \ + --limit 5 +``` + +Disable default state/assignee filters explicitly (for exact ID checks): + +```bash +specfact backlog daily github \ + --repo-owner nold-ai \ + --repo-name specfact-demo-repo \ + --id 28 \ + --state any \ + --assignee any +``` + +ADO: + +```bash +specfact backlog daily ado \ + --ado-org dominikusnold \ + --ado-project "Specfact CLI" \ + --limit 5 +``` + +## 4) Refine Workflow (Preview + Tmp Export/Import) + +GitHub export: + +```bash +specfact backlog refine github \ + --repo-owner nold-ai \ + --repo-name specfact-demo-repo \ + --limit 3 \ + --export-to-tmp +``` + +ADO export: + +```bash +specfact backlog refine ado \ + --ado-org dominikusnold \ + --ado-project "Specfact CLI" \ + --limit 3 \ + --export-to-tmp +``` + +After refining in your AI IDE, import and write back: + +```bash +# GitHub +specfact backlog refine github \ + --repo-owner nold-ai \ + --repo-name specfact-demo-repo \ + --import-from-tmp \ + --write + +# ADO +specfact backlog refine ado \ + --ado-org dominikusnold \ + --ado-project "Specfact CLI" \ + --import-from-tmp \ + --write +``` + +### Required Tmp File Contract (Important) + +For `--import-from-tmp`, each item block must keep: + +- `## Item N: ` +- `**ID**: <original-id>` (mandatory, unchanged) +- `**URL**`, `**State**`, `**Provider**` +- `**Body**:` fenced with ```markdown + +Minimal scaffold: + +````markdown +## Item 1: Example title + +**ID**: 123 +**URL**: https://example +**State**: Active +**Provider**: ado + +**Body**: +```markdown +## As a +... +``` +```` + +Do not rename labels and do not remove details during refinement. + +## 5) Minimal Create + Check Loop + +Create test issue/work item: + +```bash +# GitHub create +specfact backlog add \ + --adapter github \ + --project-id nold-ai/specfact-demo-repo \ + --type story \ + --title "SpecFact demo smoke test $(date +%Y-%m-%d-%H%M)" \ + --body "Demo item created by quickstart." \ + --acceptance-criteria "Demo item exists and is retrievable" \ + --non-interactive + +# ADO create +specfact backlog add \ + --adapter ado \ + --project-id "dominikusnold/Specfact CLI" \ + --type story \ + --title "SpecFact demo smoke test $(date +%Y-%m-%d-%H%M)" \ + --body "Demo item created by quickstart." \ + --acceptance-criteria "Demo item exists and is retrievable" \ + --non-interactive +``` + +Then verify retrieval by ID using `daily` or `refine --id <id>`. + +## Quick Troubleshooting + +- DNS/network errors (`api.github.com`, `dev.azure.com`): verify outbound network access. +- Auth errors: re-run `specfact auth status`. +- ADO mapping issues: re-run `backlog map-fields` and confirm `--ado-framework` is correct. +- Refine import mismatch: check `**ID**` was preserved exactly. + +## ADO Hardening Profile (Corporate Networks) + +For unstable corporate VPN/proxy/firewall paths, use this reliability profile. + +### Runtime behavior now hardened in CLI + +- ADO `daily`/`refine` read paths now retry transient transport failures (`ConnectionError`, reset/disconnect, timeout). +- Retry policy also covers retryable HTTP statuses (`429`, `500`, `502`, `503`, `504`) with backoff. +- Hardened paths include: + - WIQL query execution + - Work-item batch fetch + - Iteration/team lookup + - Work-item comments fetch + +### Operational command recommendations + +Use explicit provider context and bounded scope to reduce query fragility: + +```bash +# Daily: explicit scope +specfact backlog daily ado \ + --ado-org dominikusnold \ + --ado-project "Specfact CLI" \ + --state New \ + --limit 20 + +# Refine: small batches first, then scale +specfact backlog refine ado \ + --ado-org dominikusnold \ + --ado-project "Specfact CLI" \ + --state New \ + --limit 5 \ + --export-to-tmp +``` + +If current iteration auto-detection is unreliable in your environment, pass explicit filters (`--state`, `--sprint`, `--iteration`) rather than relying on defaults. + +### Create flow reliability notes + +- `backlog add` uses safe no-replay behavior for create operations to avoid accidental duplicate work-item creation on ambiguous transport failures. +- If create returns an ambiguous transport error, check ADO for the title before retrying manually. diff --git a/docs/guides/backlog-refinement.md b/docs/guides/backlog-refinement.md index 2a314112..bc3debbe 100644 --- a/docs/guides/backlog-refinement.md +++ b/docs/guides/backlog-refinement.md @@ -430,6 +430,7 @@ specfact backlog ceremony refinement <ADAPTER> [OPTIONS] **Options**: - `--search`, `-s` - Search query to filter backlog items +- `--state any` / `--assignee any` - Explicitly disable state/assignee filtering when needed (for example ID-specific runs). - `--template`, `-t` - Target template ID (default: auto-detect) - `--ignore-refined` / `--no-ignore-refined` - When using `--limit N`, apply limit to items that need refinement (default: ignore already-refined items so you see N items that actually need work) - `--id` - Refine only the backlog item with the given issue or work item ID diff --git a/docs/index.md b/docs/index.md index 6ec81ba8..076c700d 100644 --- a/docs/index.md +++ b/docs/index.md @@ -40,6 +40,29 @@ Recommended command entrypoints: ## ๐Ÿš€ Quick Start +### Backlog Bridge in 60 Seconds + +SpecFact closes the drift gap between **backlog -> specs -> code**. + +```bash +# 1) Initialize backlog config + field mapping +specfact backlog init-config --force +specfact backlog map-fields --provider ado --ado-org <org> --ado-project "<project>" + +# 2) Read and refine real backlog scope +specfact backlog daily ado --ado-org <org> --ado-project "<project>" --state any --assignee any --limit 5 +specfact backlog refine ado --ado-org <org> --ado-project "<project>" --id <work-item-id> --preview + +# 3) Validate drift before implementation +specfact policy validate --group-by-item +``` + +GitHub variant: +`specfact backlog daily github --repo-owner <owner> --repo-name <repo> --state any --assignee any --limit 5` + +Deep dive: +- **[Backlog Quickstart Demo (GitHub + ADO)](getting-started/tutorial-backlog-quickstart-demo.md)** + ### New to SpecFact CLI? **Primary Use Case**: Understanding and improving existing codebases (and new projects) @@ -235,6 +258,6 @@ See [CONTRIBUTING.md](https://github.com/nold-ai/specfact-cli/blob/main/CONTRIBU Copyright ยฉ 2025 Nold AI (Owner: Dominikus Nold) -**Trademarks**: All product names, logos, and brands mentioned in this documentation are the property of their respective owners. NOLD AI (NOLDAI) is a registered trademark (wordmark) at the European Union Intellectual Property Office (EUIPO). See [TRADEMARKS.md](../TRADEMARKS.md) for more information. +**Trademarks**: All product names, logos, and brands mentioned in this documentation are the property of their respective owners. NOLD AI (NOLDAI) is a registered trademark (wordmark) at the European Union Intellectual Property Office (EUIPO). See [TRADEMARKS.md](https://github.com/nold-ai/specfact-cli/blob/main/TRADEMARKS.md) for more information. -**License**: See [LICENSE](../LICENSE) for licensing information. +**License**: See [LICENSE](https://github.com/nold-ai/specfact-cli/blob/main/LICENSE) for licensing information. diff --git a/docs/reference/commands.md b/docs/reference/commands.md index 8c7b3e1b..9da1b32f 100644 --- a/docs/reference/commands.md +++ b/docs/reference/commands.md @@ -3978,8 +3978,8 @@ specfact backlog refine <ADAPTER> [OPTIONS] **Filtering Options:** - `--labels`, `--tags` - Filter by labels/tags (can specify multiple, e.g., `--labels feature,enhancement`) -- `--state` - Filter by state (e.g., `open`, `closed`, `active`) -- `--assignee` - Filter by assignee username +- `--state` - Filter by state (e.g., `open`, `closed`, `active`). Use `any` to disable state filtering. +- `--assignee` - Filter by assignee username. Use `any` to disable assignee filtering. - `--iteration` - Filter by iteration path (ADO format: `Project\\Sprint 1`) - `--sprint` - Filter by sprint identifier - `--release` - Filter by release identifier diff --git a/feature b/feature new file mode 100644 index 00000000..e69de29b diff --git a/modules/backlog-core/module-package.yaml b/modules/backlog-core/module-package.yaml index 9da20259..4caea7a8 100644 --- a/modules/backlog-core/module-package.yaml +++ b/modules/backlog-core/module-package.yaml @@ -1,5 +1,5 @@ name: backlog-core -version: 0.1.3 +version: 0.1.5 commands: - backlog command_help: @@ -22,8 +22,8 @@ publisher: url: https://github.com/nold-ai/specfact-cli-modules email: oss@nold.ai integrity: - checksum: sha256:12839ef2ee3a2eb6fd9901abc86b8837e6afb89ca372b6b3a0df1b4ed7c66279 - signature: rGstdKKFPad8oJ66Lse3UUD/cYx5u7HMg+ZR12mL+QNetamMeYk8QCdiryh/RAy3JIrtNW5zVY47OVuxzhSCAg== + checksum: sha256:c6ae56b1e5f3cf4d4bc0d9d256f24e6377f08e4e82a1f8bead935c0e7cee7431 + signature: FpTzbqYcR+6jiRUXjqvzfmqoLGeam7lLyLLc/ZfT7AokzRPz4cl5F/KO0b3XZmXQfHWfT+GFTJi5T/POkobJCg== dependencies: [] description: Provide advanced backlog analysis and readiness capabilities. license: Apache-2.0 diff --git a/modules/backlog-core/src/backlog_core/commands/add.py b/modules/backlog-core/src/backlog_core/commands/add.py index da3c7526..83d118d4 100644 --- a/modules/backlog-core/src/backlog_core/commands/add.py +++ b/modules/backlog-core/src/backlog_core/commands/add.py @@ -370,8 +370,14 @@ def _has_github_repo_issue_type_mapping(provider_fields: dict[str, Any] | None, type_ids = issue_cfg.get("type_ids") if not isinstance(type_ids, dict): return False - mapped = str(type_ids.get(issue_type) or type_ids.get(issue_type.lower()) or "").strip() - return bool(mapped) + normalized = issue_type.strip().lower() + mapped = str(type_ids.get(issue_type) or type_ids.get(normalized) or "").strip() + if mapped: + return True + if normalized == "story": + fallback = str(type_ids.get("feature") or type_ids.get("Feature") or "").strip() + return bool(fallback) + return False @beartype diff --git a/modules/backlog-core/tests/unit/test_add_command.py b/modules/backlog-core/tests/unit/test_add_command.py index 3f958870..4a5eb190 100644 --- a/modules/backlog-core/tests/unit/test_add_command.py +++ b/modules/backlog-core/tests/unit/test_add_command.py @@ -15,6 +15,7 @@ sys.path.insert(0, str(REPO_ROOT / "modules" / "backlog-core" / "src")) sys.path.insert(0, str(REPO_ROOT / "src")) +from backlog_core.commands.add import _has_github_repo_issue_type_mapping from backlog_core.main import backlog_app @@ -41,6 +42,18 @@ def create_issue(self, project_id: str, payload: dict) -> dict: return {"id": "123", "key": "123", "url": "https://example.test/issues/123"} +def test_has_github_repo_issue_type_mapping_story_fallback_to_feature() -> None: + """When story is unavailable but feature exists, mapping should still be considered available.""" + provider_fields = {"github_issue_types": {"type_ids": {"feature": "IT_FEATURE_ID"}}} + assert _has_github_repo_issue_type_mapping(provider_fields, "story") is True + + +def test_has_github_repo_issue_type_mapping_story_missing_without_feature() -> None: + """When both story and feature are unavailable, mapping is unavailable.""" + provider_fields = {"github_issue_types": {"type_ids": {"bug": "IT_BUG_ID"}}} + assert _has_github_repo_issue_type_mapping(provider_fields, "story") is False + + def test_backlog_add_non_interactive_requires_type_and_title(monkeypatch) -> None: """Non-interactive add fails when required options are missing.""" from specfact_cli.adapters.registry import AdapterRegistry diff --git a/openspec/CHANGE_ORDER.md b/openspec/CHANGE_ORDER.md index 81241234..27c4f6be 100644 --- a/openspec/CHANGE_ORDER.md +++ b/openspec/CHANGE_ORDER.md @@ -100,6 +100,7 @@ These are derived extensions of the same 2026-02-15 plan and are required to ope | backlog-core | 02 | backlog-core-02-interactive-issue-creation (implemented 2026-02-22; archived) | [#173](https://github.com/nold-ai/specfact-cli/issues/173) | #116 (optional: #176, #177) | | backlog-core | 04 | backlog-core-04-installed-runtime-discovery-and-add-prompt (implemented 2026-02-23; archived) | [#295](https://github.com/nold-ai/specfact-cli/issues/295) | #173 | | backlog-core | 05 | backlog-core-05-user-modules-bootstrap (implemented 2026-02-23; pending archive) | [#298](https://github.com/nold-ai/specfact-cli/issues/298) | #173 | +| backlog-core | 06 | backlog-core-06-refine-custom-field-writeback | [#310](https://github.com/nold-ai/specfact-cli/issues/310) | #173 | ### backlog-scrum diff --git a/openspec/changes/backlog-core-06-refine-custom-field-writeback/.openspec.yaml b/openspec/changes/backlog-core-06-refine-custom-field-writeback/.openspec.yaml new file mode 100644 index 00000000..e331c975 --- /dev/null +++ b/openspec/changes/backlog-core-06-refine-custom-field-writeback/.openspec.yaml @@ -0,0 +1,2 @@ +schema: spec-driven +created: 2026-02-25 diff --git a/openspec/changes/backlog-core-06-refine-custom-field-writeback/CHANGE_VALIDATION.md b/openspec/changes/backlog-core-06-refine-custom-field-writeback/CHANGE_VALIDATION.md new file mode 100644 index 00000000..b6e61002 --- /dev/null +++ b/openspec/changes/backlog-core-06-refine-custom-field-writeback/CHANGE_VALIDATION.md @@ -0,0 +1,37 @@ +# CHANGE VALIDATION: backlog-core-06-refine-custom-field-writeback + +## Date +2026-02-25 + +## Scope Reviewed +- Proposal, design, tasks, and spec delta for custom field writeback reliability and tmp import ID contract. +- Impacted runtime surfaces: + - `src/specfact_cli/backlog/mappers/ado_mapper.py` + - `src/specfact_cli/adapters/ado.py` + - `src/specfact_cli/modules/backlog/src/commands.py` + - `resources/prompts/specfact.backlog-refine.md` + +## Breaking-Change Analysis +- External CLI flags: no additions/removals. +- Behavior changes: + - ADO writeback target field selection becomes deterministic and honors custom mapping precedence. + - Tmp import now fails explicitly when IDs are missing or mismatched instead of silently producing zero updates. +- Compatibility: low risk and backward-compatible for valid refine artifacts. Invalid artifacts now fail faster with guidance. + +## Dependency Analysis +- Adapter dependency: confined to existing `AdoFieldMapper` and `AdoAdapter.update_backlog_item` interaction. +- Command dependency: confined to `backlog refine --import-from-tmp` flow and prompt/export guidance. +- No new package/runtime dependencies introduced. + +## Validation Commands +```bash +openspec validate backlog-core-06-refine-custom-field-writeback --strict +``` + +Result: `Change 'backlog-core-06-refine-custom-field-writeback' is valid`. + +## Conclusion +Change is safe to implement with focused tests covering: +1. custom mapping precedence for canonical write targets, +2. adapter patch paths for mapped fields, +3. tmp import ID mismatch failure behavior. diff --git a/openspec/changes/backlog-core-06-refine-custom-field-writeback/TDD_EVIDENCE.md b/openspec/changes/backlog-core-06-refine-custom-field-writeback/TDD_EVIDENCE.md new file mode 100644 index 00000000..c17377cf --- /dev/null +++ b/openspec/changes/backlog-core-06-refine-custom-field-writeback/TDD_EVIDENCE.md @@ -0,0 +1,75 @@ +# TDD Evidence: backlog-core-06-refine-custom-field-writeback + +## Pre-Implementation Failing Run + +- Timestamp (UTC): 2026-02-25T12:48:54Z +- Command: + +```bash +hatch run pytest \ + tests/unit/backlog/test_field_mappers.py::TestAdoFieldMapper::test_resolve_write_target_prefers_custom_mapping_field \ + tests/unit/adapters/test_ado_backlog_adapter.py::TestAdoBacklogAdapter::test_update_backlog_item_uses_custom_story_points_field_mapping \ + tests/unit/commands/test_backlog_commands.py::TestBuildRefineExportContent::test_refine_export_marks_id_as_mandatory_for_import \ + tests/unit/commands/test_backlog_commands.py::TestRefineImportFromTmp::test_import_from_tmp_fails_when_no_parsed_ids_match_fetched_items \ + -v +``` + +- Result: **FAILED (4 failed)** +- Failure summary: + - `AdoFieldMapper` missing `resolve_write_target_field` API. + - ADO adapter did not PATCH `Microsoft.VSTS.Scheduling.StoryPoints` under custom mapping. + - refine export content missing mandatory ID contract text. + - refine import from tmp returned success on unmatched IDs instead of explicit failure. + +## Post-Implementation Passing Run + +- Timestamp (UTC): 2026-02-25T12:02:36Z +- Command: + +```bash +hatch run pytest \ + tests/unit/commands/test_backlog_commands.py::TestResolveTargetTemplateForRefineItem::test_ado_user_story_type_prefers_user_story_template \ + tests/unit/commands/test_backlog_commands.py::TestParseRefinedExportMarkdown::test_parses_item_when_file_starts_with_item_header \ + tests/unit/adapters/test_ado_backlog_adapter.py::TestAdoBacklogAdapter::test_create_issue_uses_custom_mapped_fields_and_markdown_multiline_format \ + tests/unit/adapters/test_ado_backlog_adapter.py::TestAdoBacklogAdapter::test_update_backlog_item_strips_leading_description_heading_for_ado \ + tests/integration/backlog/test_ado_markdown_rendering.py::TestAdoMarkdownRendering::test_update_backlog_item_with_markdown_format \ + -q +``` + +- Result: **PASSED (5 passed)** +- Passing summary: + - ADO user-story work item types are steered to `user_story_v1` template resolution. + - Refine tmp import parser handles first-block headers and does not leak `## Item N:` into title. + - ADO create path honors custom mapped write targets and markdown format metadata. + - ADO update path strips leading `## Description` scaffold heading before write-back. + - ADO markdown write-back includes multiline markdown format operations for mapped rich-text fields. + +## Regression Fix: Rich Text Normalization (Review Findings) + +### Pre-Implementation Failing Run + +- Timestamp (UTC): 2026-02-25T20:51:00Z +- Command: + +```bash +hatch test -- tests/unit/backlog/test_field_mappers.py -v +``` + +- Result: **FAILED (2 failed)** +- Failure summary: + - `<br />` tags were not converted to newline because of escaped `\\s` in regex and lines collapsed in extracted content. + - Non-HTML angle-bracket text (for example `<tenant_id>` or `x < y > z`) was incorrectly treated as HTML and stripped. + +### Post-Implementation Passing Run + +- Timestamp (UTC): 2026-02-25T20:54:53Z +- Command: + +```bash +hatch test -- tests/unit/backlog/test_field_mappers.py -v +``` + +- Result: **PASSED (37 passed)** +- Passing summary: + - `<br>`, `<br/>`, and `<br />` are normalized to newlines. + - Rich text normalization only activates for known HTML tags, preventing accidental stripping of non-HTML placeholders and angle-bracket content. diff --git a/openspec/changes/backlog-core-06-refine-custom-field-writeback/design.md b/openspec/changes/backlog-core-06-refine-custom-field-writeback/design.md new file mode 100644 index 00000000..752f9157 --- /dev/null +++ b/openspec/changes/backlog-core-06-refine-custom-field-writeback/design.md @@ -0,0 +1,48 @@ +## Context + +Backlog refine writeback currently computes reverse mappings by first-seen canonical field keys. Because default mapping order includes `Microsoft.VSTS.Common.StoryPoints` before other story-point candidates, writeback can target the wrong field even when custom mappings are configured. The tmp import workflow also treats `**ID**` as required in parser logic but does not enforce that contract strongly enough in user-facing instructions and mismatch handling. + +## Goals / Non-Goals + +**Goals:** +- Make canonical-to-provider writeback field resolution deterministic and custom-mapping-safe for ADO. +- Ensure refine tmp import contract explicitly requires preserving `**ID**` for lookup. +- Fail fast with actionable diagnostics when parsed IDs do not map to fetched items. + +**Non-Goals:** +- No redesign of adapter registry or template detection. +- No change to provider-independent refine output structure beyond explicit ID contract text. + +## Decisions + +1. Introduce mapper-level write-target resolution helper. +- Add a dedicated method in `AdoFieldMapper` that resolves the preferred ADO field for a canonical field. +- Precedence: custom mapping key(s) first, then provider-present mapped fields (from current item/provider_fields), then default/framework fallback. +- Rationale: centralizes precedence in mapper and avoids duplicated, order-sensitive logic in adapters. + +2. Update `AdoAdapter.update_backlog_item` to use resolved canonical targets. +- Replace ad-hoc reverse mapping and membership checks with mapper-resolved targets for each canonical field. +- Rationale: guarantees consistency and removes dependence on Python dict insertion order side effects. + +3. Strengthen tmp import contract and mismatch handling. +- Update prompt/export guidance to state `**ID**` is mandatory and must be unchanged. +- Add explicit command error when parsed blocks exist but zero IDs match fetched items. +- Rationale: prevents silent no-op writeback and improves Copilot workflow reliability. + +## Risks / Trade-offs + +- [Risk] Mapper helper introduces new logic branch for target selection. + - Mitigation: Add focused unit tests for custom/default precedence and adapter patch-path assertions. +- [Risk] Stricter tmp import validation may fail previously permissive malformed files. + - Mitigation: Provide explicit remediation text in error output and prompt instructions. + +## Migration Plan + +1. Add/modify specs and tests for mapping precedence and ID mismatch behavior. +2. Capture failing test evidence in `TDD_EVIDENCE.md`. +3. Implement mapper + adapter + command changes. +4. Re-run targeted tests and then full required quality gates. + +## Open Questions + +- None for v1. Current scope fully covers reported custom story points and ID contract failures. diff --git a/openspec/changes/backlog-core-06-refine-custom-field-writeback/proposal.md b/openspec/changes/backlog-core-06-refine-custom-field-writeback/proposal.md new file mode 100644 index 00000000..6fb7a36a --- /dev/null +++ b/openspec/changes/backlog-core-06-refine-custom-field-writeback/proposal.md @@ -0,0 +1,37 @@ +# Change: Fix custom field mapping reliability in backlog refine writeback + +## Why + +`specfact backlog refine --import-from-tmp --write` can write ADO story points to a default field (`Microsoft.VSTS.Common.StoryPoints`) even when custom mapping or framework mapping requires another field (for example `Microsoft.VSTS.Scheduling.StoryPoints` or custom process fields). This causes persistent writeback failures and incorrect field updates in Copilot-mode refinement workflows. + +## What Changes + +- **MODIFY** ADO writeback field target resolution so canonical fields (`story_points`, `acceptance_criteria`, `business_value`, `priority`) consistently resolve to the effective mapped provider field with deterministic precedence. +- **MODIFY** backlog refine tmp export/import guidance to require preserving per-item `**ID**` for successful lookup and writeback. +- **MODIFY** tmp import behavior to fail fast with an actionable error when parsed refined blocks do not match any fetched backlog item IDs. +- **MODIFY** backlog prompt templates under `resources/prompts/specfact.backlog-*.md` to document exact parser-required tmp/input structure for each backlog command path. +- **MODIFY** ADO extraction normalization so body and acceptance criteria rich text are consistently converted to markdown-like text before refine/export/writeback operations. +- **EXTEND** tests across mapper, adapter, and command import flows to prove custom mapping reliability and ID contract enforcement. + +## Capabilities + +### New Capabilities +- `backlog-refine-writeback-mapping`: deterministic write-target selection for mapped provider fields during backlog refine writeback. + +### Modified Capabilities +- `backlog-refinement`: refine export/import contract and failure handling for mandatory item IDs. +- `format-abstraction`: normalize ADO rich text/HTML backlog fields to markdown-like text for canonical backlog model usage. + +## Impact + +- Affected code: + - `src/specfact_cli/backlog/mappers/ado_mapper.py` + - `src/specfact_cli/adapters/ado.py` + - `src/specfact_cli/backlog/converter.py` + - `src/specfact_cli/modules/backlog/src/commands.py` + - `resources/prompts/specfact.backlog-*.md` + - tests under `tests/unit/adapters/`, `tests/unit/backlog/`, `tests/unit/commands/` +- Affected behavior: + - ADO field writeback in refine flows now respects configured mapping priority for all mapped canonical fields. + - Tmp import requires stable `**ID**` keys and surfaces explicit mismatch errors instead of silently producing zero updates. +- Dependencies: no new runtime dependency; relies on existing ADO mapper and backlog adapter abstractions. diff --git a/openspec/changes/backlog-core-06-refine-custom-field-writeback/specs/backlog-refinement/spec.md b/openspec/changes/backlog-core-06-refine-custom-field-writeback/specs/backlog-refinement/spec.md new file mode 100644 index 00000000..09ce7f98 --- /dev/null +++ b/openspec/changes/backlog-core-06-refine-custom-field-writeback/specs/backlog-refinement/spec.md @@ -0,0 +1,47 @@ +## MODIFIED Requirements + +### Requirement: Abstract Field Mapping Layer + +The system SHALL provide an abstract field mapping layer that normalizes provider-specific field structures to canonical field names. + +#### Scenario: ADO writeback resolves mapped story points field deterministically + +- **GIVEN** an ADO work item refine writeback with `story_points` set +- **AND** multiple candidate ADO fields map to `story_points` (for example default and custom mappings) +- **WHEN** writeback field resolution runs +- **THEN** the system selects the effective write target with deterministic precedence: explicit custom mapping first, then provider-present mapped fields, then framework/default fallback +- **AND** the PATCH operation uses the resolved mapped field (for example `Microsoft.VSTS.Scheduling.StoryPoints` or a custom field) +- **AND** the system does not silently fall back to a non-selected default field. + +#### Scenario: ADO writeback resolves all mapped canonical fields consistently + +- **GIVEN** canonical update values for `acceptance_criteria`, `story_points`, `business_value`, and `priority` +- **WHEN** ADO writeback builds PATCH operations +- **THEN** each canonical field uses the same mapped write-target resolution strategy +- **AND** custom mappings apply consistently across all canonical fields supported by ADO mapper configuration. + +### Requirement: Backlog Item Refinement Command + +The system SHALL provide a `specfact backlog refine` command that enables teams to standardize backlog items using AI-assisted template matching and refinement. + +#### Scenario: Refined tmp import requires stable item IDs + +- **GIVEN** a refined markdown artifact intended for `--import-from-tmp` +- **WHEN** the artifact is parsed +- **THEN** each `## Item N:` block MUST include an `**ID**` property copied from the export +- **AND** import rejects artifacts that omit required IDs for item lookup. + +#### Scenario: Refined tmp import reports ID mismatch explicitly + +- **GIVEN** a refined markdown artifact with parsed item blocks +- **AND** none of the parsed `**ID**` values match fetched backlog items for the current refine command filters +- **WHEN** import processing runs +- **THEN** the command exits with an explicit error describing the ID mismatch +- **AND** the message instructs the user to preserve exported IDs unchanged. + +#### Scenario: `any` disables state/assignee filtering + +- **GIVEN** a user runs backlog commands that support state/assignee filters (for example `daily` or `refine`) +- **WHEN** the user passes `--state any` and/or `--assignee any` +- **THEN** the system treats the respective filter as disabled (no filter applied) +- **AND** command output/help makes this behavior explicit so default scoping is understandable. diff --git a/openspec/changes/backlog-core-06-refine-custom-field-writeback/tasks.md b/openspec/changes/backlog-core-06-refine-custom-field-writeback/tasks.md new file mode 100644 index 00000000..43d64306 --- /dev/null +++ b/openspec/changes/backlog-core-06-refine-custom-field-writeback/tasks.md @@ -0,0 +1,39 @@ +## 1. Change Setup + +- [x] 1.1 Create worktree branch `bugfix/backlog-core-06-refine-custom-field-writeback` from `origin/dev` using `scripts/worktree.sh create` and run all implementation commands from that worktree. +- [x] 1.2 Run `openspec validate backlog-core-06-refine-custom-field-writeback --strict` and fix artifact issues. +- [x] 1.3 Run `.cursor/commands/wf-validate-change.md` workflow expectations and capture output in `openspec/changes/backlog-core-06-refine-custom-field-writeback/CHANGE_VALIDATION.md`. + +## 2. Tests First (TDD) + +- [x] 2.1 Add/modify mapper tests to verify deterministic canonical write-target selection prefers custom mapping and mapped provider-present fields for `story_points`, `acceptance_criteria`, `business_value`, and `priority`. +- [x] 2.2 Add/modify adapter tests to assert ADO PATCH paths use resolved mapped fields (including `Microsoft.VSTS.Scheduling.StoryPoints` and custom story points field cases). +- [x] 2.3 Add/modify command tests to enforce tmp import ID contract and explicit mismatch failure when parsed IDs do not match fetched items. +- [x] 2.4 Run targeted tests and record failing pre-implementation evidence in `TDD_EVIDENCE.md` with command, timestamp, and failure summary. + +## 3. Implementation + +- [x] 3.1 Implement mapper-level canonical write-target resolution helper and integrate it into writeback path. +- [x] 3.2 Update `AdoAdapter.update_backlog_item` to use mapper write-target resolution for mapped canonical fields and remove order-sensitive reverse mapping behavior. +- [x] 3.3 Update refine export instructions and prompt guidance to mark `**ID**` as mandatory and unchanged for tmp import. +- [x] 3.4 Update tmp import flow to fail fast with actionable message when parsed IDs do not map to fetched backlog items. + +## 4. Verification and Quality Gates + +- [x] 4.1 Re-run targeted tests and record passing post-implementation evidence in `TDD_EVIDENCE.md`. +- [x] 4.2 Run quality gates in order: `hatch run format`, `hatch run type-check`, `hatch run lint`, `hatch run yaml-lint`, `hatch run contract-test`, `hatch run smart-test`. +- [x] 4.3 Update docs impacted by behavior changes (`resources/prompts/specfact.backlog-refine.md`, command reference if required). +- [x] 4.4 Run `hatch run ./scripts/verify-modules-signature.py --require-signature`. +- [x] 4.5 Prepare PR to `dev` with TDD evidence and change validation artifacts linked. + +## 5. Extended Runtime Findings Coverage + +- [x] 5.1 Fix parser/template mismatch across backlog prompt resources so tmp/import structure instructions exactly match parser expectations per provider. +- [x] 5.2 Ensure ADO markdown-supported fields are consistently written in markdown format (including acceptance criteria and description paths). +- [x] 5.3 Prevent title pollution on tmp import/writeback (no accidental `## Item <no>:` prefix in title updates). +- [x] 5.4 Correct template steering for ADO user stories (prefer `user_story_v1` over generic `ado_work_item_v1` where applicable). +- [x] 5.5 Prevent duplicated section headings in structured backend fields (for example ADO `Description` content). +- [x] 5.6 Extend `map-fields` to include explicit ADO process/framework selection and persist/apply it for template/prompt steering. +- [x] 5.7 Align prompt guidance with provider-specific field schemas (for example story points handling in ADO process templates). +- [x] 5.8 Add/strengthen anti-summarization guardrails in refine prompting/import flow so bulk refine does not silently drop required detail. +- [x] 5.9 Add explicit no-filter override semantics for backlog commands: `--state any` and `--assignee any` must disable filtering, and document this behavior in command/help docs. diff --git a/pyproject.toml b/pyproject.toml index a3dae7e3..7048f0ee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "specfact-cli" -version = "0.37.4" +version = "0.37.5" description = "The swiss knife CLI for agile DevOps teams. Keep backlog, specs, tests, and code in sync with validation and contract enforcement for new projects and long-lived codebases." readme = "README.md" requires-python = ">=3.11" diff --git a/resources/prompts/specfact.backlog-add.md b/resources/prompts/specfact.backlog-add.md index b72b6ad4..84427891 100644 --- a/resources/prompts/specfact.backlog-add.md +++ b/resources/prompts/specfact.backlog-add.md @@ -79,6 +79,12 @@ specfact backlog add [OPTIONS] - Always execute `specfact backlog add` for creation. - Do not create provider issues/work items directly outside CLI unless user explicitly requests a manual path. +## Input Contract + +- This command does not use `--export-to-tmp`/`--import-from-tmp` artifacts. +- Provide values through CLI options or interactive prompts; do not fabricate external tmp-file schemas. +- Do not ask Copilot to output `## Item N:` sections, `**ID**` labels, or markdown tmp files for this command. + ## Context {ARGS} diff --git a/resources/prompts/specfact.backlog-daily.md b/resources/prompts/specfact.backlog-daily.md index a56338e6..e4675434 100644 --- a/resources/prompts/specfact.backlog-daily.md +++ b/resources/prompts/specfact.backlog-daily.md @@ -35,8 +35,8 @@ When run from a **clone**, org/repo or org/project are inferred from `git remote ### Filters -- `--state STATE` - Filter by state (e.g. open, Active) -- `--assignee USERNAME` or `--assignee me` - Filter by assignee +- `--state STATE` - Filter by state (e.g. open, Active). Use `--state any` to disable state filtering. +- `--assignee USERNAME` or `--assignee me` - Filter by assignee. Use `--assignee any` to disable assignee filtering. - `--search QUERY` - Provider-specific search query - `--release RELEASE` - Filter by release identifier - `--id ISSUE_ID` - Filter to one exact backlog item ID @@ -114,6 +114,12 @@ When the user has run `specfact backlog daily ... --summarize` or `--summarize-t - Use `--interactive` for story-by-story walkthrough; use `--summarize` or `--summarize-to` when a standup summary prompt is needed. - Use `--copilot-export` when you need a file of item summaries for reference during standup. +## Output Contract + +- This command does not support `--import-from-tmp`; do not invent a tmp import schema. +- Do not instruct Copilot to produce `## Item N:` blocks or `**ID**`/`**Body**` tmp artifacts for this command. +- If you write `--copilot-export` or `--summarize-to` artifacts, keep item sections and IDs unchanged from CLI output. + ## Context {ARGS} diff --git a/resources/prompts/specfact.backlog-refine.md b/resources/prompts/specfact.backlog-refine.md index 3c800348..2d6f83f5 100644 --- a/resources/prompts/specfact.backlog-refine.md +++ b/resources/prompts/specfact.backlog-refine.md @@ -58,10 +58,11 @@ Refine backlog items from DevOps tools (GitHub Issues, Azure DevOps, etc.) into ### Filters - `--labels LABELS` or `--tags TAGS` - Filter by labels/tags (comma-separated, e.g., "feature,enhancement") -- `--state STATE` - Filter by state (case-insensitive, e.g., "open", "closed", "Active", "New") +- `--state STATE` - Filter by state (case-insensitive, e.g., "open", "closed", "Active", "New"). Use `--state any` to disable state filtering. - `--assignee USERNAME` - Filter by assignee (case-insensitive): - **GitHub**: Login or @username (e.g., "johndoe" or "@johndoe") - **ADO**: displayName, uniqueName, or mail (e.g., "Jane Doe" or `"jane.doe@example.com"`) + - Use `--assignee any` to disable assignee filtering. - `--iteration PATH` - Filter by iteration path (ADO format: "Project\\Sprint 1", case-insensitive) - `--sprint SPRINT` - Filter by sprint (case-insensitive): - **ADO**: Use full iteration path (e.g., "Project\\Sprint 1") to avoid ambiguity when multiple sprints share the same name @@ -102,6 +103,155 @@ Refine backlog items from DevOps tools (GitHub Issues, Azure DevOps, etc.) into When refining from an exported file, treat the embedded instructions in that file as the source of truth for required structure and formatting. +**Critical content-preservation rule**: +- Never summarize, shorten, or silently remove details from story content. +- Preserve all existing requirements, constraints, business value, and feature intent. +- Refinement must increase clarity/structure, not reduce scope/detail. + +**Exact tmp structure contract (`--import-from-tmp`)**: + +- Keep one section per item with this exact heading pattern: `## Item N: <title>`. +- The first non-empty line of each item block MUST be the `## Item N: ...` heading. +- Keep and preserve these metadata labels exactly (order may vary; labels must match): + - `**ID**: <original exported id>` (**mandatory and unchanged**) + - `**URL**: <url>` + - `**State**: <state>` + - `**Provider**: <provider>` +- Keep body in this exact form (fence language must be `markdown`): + - `**Body**:` + - ```` ```markdown ... ``` ```` +- Optional parsed fields (if present) must use exact labels: + - `**Acceptance Criteria**:` + - `**Metrics**:` with lines containing `Story Points:`, `Business Value:`, `Priority:` +- Do not prepend explanatory text, summaries, or headers before the first `## Item N:` block. +- Do not rename labels (`**ID**`, `**Body**`, `**Acceptance Criteria**`, `**Metrics**`). + +Exact item example: + +````markdown +## Item 1: Improve backlog refine import mapping + +**ID**: 123 +**URL**: https://dev.azure.com/org/project/_workitems/edit/123 +**State**: Active +**Provider**: ado + +**Metrics**: +- Story Points: 5 +- Business Value: 8 +- Priority: 2 + +**Acceptance Criteria**: +- [ ] Mapping uses configured story points field + +**Body**: +```markdown +## Description + +Refined body content. +``` +```` + +If `**ID**` is missing or changed, import cannot map refined content to backlog items and writeback will fail. + +**Provider-specific body contract (critical)**: + +- **GitHub**: + - Keep template narrative sections in `**Body**` markdown (for example `## As a`, `## I want`, `## So that`, `## Acceptance Criteria` when required by template). + - Metrics may be in `**Metrics**` and/or body sections if your template expects body headings. +- **ADO**: + - Keep narrative/template sections in `**Body**` markdown. + - Keep structured metadata in `**Metrics**` (`Story Points`, `Business Value`, `Priority`). + - Do **not** add metadata-only headings (`## Story Points`, `## Business Value`, `## Priority`, `## Work Item Type`, `## Area Path`, `## Iteration Path`) inside body text. + - Do **not** duplicate `## Description` heading text into the narrative content. + +**Template-driven refinement method (mandatory)**: + +- Use exported `**Target Template**`, `**Required Sections**`, and `**Optional Sections**` as the authoritative contract for each item. +- Preserve all functional and non-functional requirements; never silently drop details. +- Improve clarity, specificity, and testability (SMART-style) without scope reduction. +- If one story is too large, propose split candidates in `## Notes`; do not remove detail from the original item silently. + +**What to include / exclude boundaries**: + +- Include: + - All original business intent, user value, constraints, assumptions, dependencies, and acceptance signals. + - Explicit acceptance criteria and measurable outcomes. +- Exclude: + - Generic summaries that replace detailed requirements. + - Placeholder text (`unspecified`, `TBD`, `no info`) when original detail exists. + - Extra wrapper prose outside `## Item N:` blocks. + +One-shot GitHub scaffold example: + +````markdown +## Item 1: Improve authentication flow + +**ID**: 42 +**URL**: https://github.com/org/repo/issues/42 +**State**: open +**Provider**: github + +**Metrics**: +- Story Points: 8 +- Business Value: 13 +- Priority: 2 + +**Acceptance Criteria**: +- [ ] Token refresh handles expiry and retry behavior + +**Body**: +```markdown +## As a +platform user + +## I want +reliable authentication and token refresh behavior + +## So that +I can access protected resources without disruption + +## Acceptance Criteria +- [ ] Valid refresh token rotates and issues new access token +- [ ] Expired/invalid token returns clear error and audit event +``` +```` + +One-shot ADO scaffold example: + +````markdown +## Item 2: Harden login reliability + +**ID**: 108 +**URL**: https://dev.azure.com/org/project/_workitems/edit/108 +**State**: Active +**Provider**: ado + +**Metrics**: +- Story Points: 5 +- Business Value: 8 +- Priority: 2 + +**Acceptance Criteria**: +- [ ] All required acceptance checks are explicit and testable + +**Body**: +```markdown +## As a +registered user + +## I want +the login flow to handle token expiry and retries safely + +## So that +I can complete authentication without ambiguity or data loss + +## Acceptance Criteria +- [ ] Expired access token triggers refresh workflow +- [ ] Failed refresh prompts re-authentication with clear guidance +``` +```` + **Comment context in export**: - Export includes item comments when adapter supports comment retrieval (GitHub + ADO). diff --git a/resources/prompts/specfact.sync-backlog.md b/resources/prompts/specfact.sync-backlog.md index 5f9446ac..83dc155e 100644 --- a/resources/prompts/specfact.sync-backlog.md +++ b/resources/prompts/specfact.sync-backlog.md @@ -50,6 +50,32 @@ Sync OpenSpec change proposals to DevOps backlog tools (GitHub Issues, ADO, Line - `--tmp-file PATH` - Specify temporary file path (used with --export-to-tmp or --import-from-tmp) - Default: `/tmp/specfact-proposal-<change-id>.md` or `/tmp/specfact-proposal-<change-id>-sanitized.md` +**Exact tmp structure contract (`sync bridge --import-from-tmp`)**: + +- Preserve proposal heading and section headers exactly: + - `# Change: <title>` + - `## Why` + - `## What Changes` +- Keep a blank line after each section header (`## Why` and `## What Changes`) before content. +- Do not rename, remove, or reorder these headers. +- Keep sanitized content inside `## Why` and `## What Changes` sections only. +- Do not add extra top-level sections before, between, or after these sections. +- If headers are missing or renamed, parser extraction for rationale/description will be incomplete. + +Exact sanitized tmp example: + +```markdown +# Change: Improve backlog refinement mapping + +## Why + +Short rationale text. + +## What Changes + +Sanitized proposal description text. +``` + ### Code Change Tracking (Advanced) - `--track-code-changes/--no-track-code-changes` - Detect code changes (git commits, file modifications) and add progress comments to existing issues (default: False) diff --git a/scripts/pre-commit-smart-checks.sh b/scripts/pre-commit-smart-checks.sh index 2ae95fb5..3931d6b6 100755 --- a/scripts/pre-commit-smart-checks.sh +++ b/scripts/pre-commit-smart-checks.sh @@ -29,6 +29,67 @@ has_staged_workflows() { staged_files | grep -E '^\.github/workflows/.*\\.ya?ml$' >/dev/null 2>&1 } +has_staged_markdown() { + staged_files | grep -E '\\.md$' >/dev/null 2>&1 +} + +run_module_signature_verification() { + info "๐Ÿ” Verifying bundled module signatures/version bumps" + if hatch run ./scripts/verify-modules-signature.py --require-signature --enforce-version-bump; then + success "โœ… Module signature/version verification passed" + else + error "โŒ Module signature/version verification failed" + warn "๐Ÿ’ก Re-sign changed modules with version bump before commit" + exit 1 + fi +} + +run_markdown_lint_if_needed() { + if has_staged_markdown; then + info "๐Ÿ“ Markdown changes detected โ€” running markdownlint" + local md_files + md_files=$(staged_files | grep -E '\\.md$' || true) + if [ -z "${md_files}" ]; then + info "โ„น๏ธ No staged markdown files resolved โ€” skipping markdownlint" + return + fi + + if command -v markdownlint >/dev/null 2>&1; then + if echo "${md_files}" | xargs -r markdownlint --config .markdownlint.json; then + success "โœ… Markdown lint passed" + else + error "โŒ Markdown lint failed" + exit 1 + fi + else + if echo "${md_files}" | xargs -r npx --yes markdownlint-cli --config .markdownlint.json; then + success "โœ… Markdown lint passed (npx)" + else + error "โŒ Markdown lint failed (npx)" + warn "๐Ÿ’ก Install markdownlint-cli globally for faster hooks: npm i -g markdownlint-cli" + exit 1 + fi + fi + else + info "โ„น๏ธ No staged Markdown changes โ€” skipping markdownlint" + fi +} + +run_format_safety() { + info "๐Ÿงน Running formatter safety check (hatch run format)" + if hatch run format; then + if ! git diff --quiet -- . || ! git diff --cached --quiet -- .; then + error "โŒ Formatter changed files. Review and re-stage before committing." + warn "๐Ÿ’ก Run: hatch run format && git add -A" + exit 1 + fi + success "โœ… Formatting check passed" + else + error "โŒ Formatting check failed" + exit 1 + fi +} + run_yaml_lint_if_needed() { if has_staged_yaml; then info "๐Ÿ”Ž YAML changes detected โ€” running yamllint (relaxed)" @@ -108,7 +169,12 @@ check_safe_change() { warn "๐Ÿ” Running pre-commit checks (YAML/workflows + smart tests)" +# Always enforce module signature/version policy before commit +run_module_signature_verification +run_format_safety + # Always run lint checks when relevant files changed +run_markdown_lint_if_needed run_yaml_lint_if_needed run_actionlint_if_needed diff --git a/scripts/setup-git-hooks.sh b/scripts/setup-git-hooks.sh index 730f8b22..c8c03801 100755 --- a/scripts/setup-git-hooks.sh +++ b/scripts/setup-git-hooks.sh @@ -46,6 +46,9 @@ fi echo -e "${GREEN}๐ŸŽ‰ Git hooks setup complete!${NC}" echo "" echo "The pre-commit hook will now:" +echo " โ€ข Verify module signatures and enforce version bumps" +echo " โ€ข Run hatch formatter safety check and fail if files are changed" +echo " โ€ข Run markdownlint for staged Markdown files" echo " โ€ข Run yamllint for YAML changes (relaxed policy)" echo " โ€ข Run actionlint for .github/workflows changes" echo " โ€ข Check for file changes using smart detection" @@ -55,6 +58,9 @@ echo " โ€ข Let GitHub Actions handle full contract test suite validation" echo " โ€ข Provide fast feedback for developers with contract validation" echo "" echo "Manual commands:" +echo " โ€ข Module signatures: hatch run ./scripts/verify-modules-signature.py --require-signature --enforce-version-bump" +echo " โ€ข Format code: hatch run format" +echo " โ€ข Markdown lint: markdownlint --config .markdownlint.json <files.md>" echo " โ€ข YAML lint: hatch run yaml-lint" echo " โ€ข Workflow lint: hatch run lint-workflows" echo " โ€ข Contract tests: hatch run contract-test" diff --git a/scripts/sign-module.sh b/scripts/sign-module.sh index 304a14eb..6c71f1e8 100755 --- a/scripts/sign-module.sh +++ b/scripts/sign-module.sh @@ -96,6 +96,78 @@ fi if [[ "$ALLOW_SAME_VERSION" -eq 1 ]]; then ARGS+=(--allow-same-version) fi + +# Enforce version bump for changed module payload before any signing/key checks. +if [[ "$ALLOW_SAME_VERSION" -ne 1 ]]; then + python3 - "$MANIFEST" <<'PY' +from __future__ import annotations + +import subprocess +import sys +from pathlib import Path + +import yaml + + +def _run(cmd: list[str]) -> str: + return subprocess.run(cmd, check=True, capture_output=True, text=True).stdout.strip() + + +manifest = Path(sys.argv[1]).resolve() +module_dir = manifest.parent + +try: + current_raw = yaml.safe_load(manifest.read_text(encoding="utf-8")) +except Exception as exc: + print(f"Error: failed reading manifest {manifest}: {exc}", file=sys.stderr) + raise SystemExit(1) from exc + +if not isinstance(current_raw, dict): + print(f"Error: invalid manifest YAML (expected object): {manifest}", file=sys.stderr) + raise SystemExit(1) + +current_version = str(current_raw.get("version", "")).strip() +if not current_version: + print(f"Error: manifest missing version: {manifest}", file=sys.stderr) + raise SystemExit(1) + +try: + manifest_rel = manifest.relative_to(Path.cwd().resolve()).as_posix() +except ValueError: + # Outside current repo root: skip git-based preflight and let signer handle it. + raise SystemExit(0) +try: + previous_text = _run(["git", "show", f"HEAD:{manifest_rel}"]) +except Exception: + raise SystemExit(0) + +try: + previous_raw = yaml.safe_load(previous_text) +except Exception: + raise SystemExit(0) +if not isinstance(previous_raw, dict): + raise SystemExit(0) + +previous_version = str(previous_raw.get("version", "")).strip() +if not previous_version or previous_version != current_version: + raise SystemExit(0) + +try: + changed = _run(["git", "diff", "--name-only", "HEAD", "--", module_dir.as_posix()]) + untracked = _run(["git", "ls-files", "--others", "--exclude-standard", "--", module_dir.as_posix()]) +except Exception: + raise SystemExit(0) + +if changed or untracked: + print( + "Error: Module version must be incremented before signing changed module contents: " + f"{manifest_rel} (current version {current_version}).", + file=sys.stderr, + ) + raise SystemExit(1) +PY +fi + python3 scripts/sign-modules.py "${ARGS[@]}" "$MANIFEST" # Emit checksum line for legacy pipeline compatibility. diff --git a/scripts/verify-modules-signature.py b/scripts/verify-modules-signature.py index 061e8f19..ac6e371a 100755 --- a/scripts/verify-modules-signature.py +++ b/scripts/verify-modules-signature.py @@ -183,8 +183,8 @@ def _changed_manifests_from_git(base_ref: str) -> list[Path]: "--name-only", f"{base_ref}...HEAD", "--", - "src/specfact_cli/modules/*/module-package.yaml", - "modules/*/module-package.yaml", + "src/specfact_cli/modules", + "modules", ], check=True, capture_output=True, @@ -194,12 +194,20 @@ def _changed_manifests_from_git(base_ref: str) -> list[Path]: raise ValueError(f"Unable to diff manifests against base ref '{base_ref}': {exc}") from exc manifests: list[Path] = [] + seen: set[Path] = set() for line in output.stdout.splitlines(): - path = Path(line.strip()) - if not path: + changed_path = Path(line.strip()) + if not changed_path: continue - if path.exists(): - manifests.append(path) + parts = changed_path.parts + manifest: Path | None = None + if len(parts) >= 4 and parts[0] == "src" and parts[1] == "specfact_cli" and parts[2] == "modules": + manifest = Path(*parts[:4]) / "module-package.yaml" + elif len(parts) >= 2 and parts[0] == "modules": + manifest = Path(*parts[:2]) / "module-package.yaml" + if manifest and manifest.exists() and manifest not in seen: + manifests.append(manifest) + seen.add(manifest) return manifests @@ -280,20 +288,20 @@ def main() -> int: except Exception as exc: failures.append(f"FAIL {manifest}: {exc}") - if failures: - print("\n".join(failures)) - return 1 - + version_failures: list[str] = [] if args.enforce_version_bump: base_ref = _resolve_version_check_base(args.version_check_base) try: version_failures = _verify_version_bumps(base_ref) except ValueError as exc: - print(f"FAIL version-check: {exc}") - return 1 + version_failures.append(f"FAIL version-check: {exc}") + + if failures or version_failures: + if failures: + print("\n".join(failures)) if version_failures: print("\n".join(version_failures)) - return 1 + return 1 print(f"Verified {len(manifests)} module manifest(s).") return 0 diff --git a/setup.py b/setup.py index 4be44b82..20caeb61 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ if __name__ == "__main__": _setup = setup( name="specfact-cli", - version="0.37.4", + version="0.37.5", description=( "The swiss knife CLI for agile DevOps teams. Keep backlog, specs, tests, and code in sync with " "validation and contract enforcement for new projects and long-lived codebases." diff --git a/src/__init__.py b/src/__init__.py index 188a8022..67e7bba5 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -3,4 +3,4 @@ """ # Package version: keep in sync with pyproject.toml, setup.py, src/specfact_cli/__init__.py -__version__ = "0.37.3" +__version__ = "0.37.5" diff --git a/src/specfact_cli/__init__.py b/src/specfact_cli/__init__.py index f5f90200..c53d0081 100644 --- a/src/specfact_cli/__init__.py +++ b/src/specfact_cli/__init__.py @@ -8,6 +8,6 @@ - Supporting agile ceremonies and team workflows """ -__version__ = "0.37.4" +__version__ = "0.37.5" __all__ = ["__version__"] diff --git a/src/specfact_cli/adapters/ado.py b/src/specfact_cli/adapters/ado.py index ab30030d..028f8267 100644 --- a/src/specfact_cli/adapters/ado.py +++ b/src/specfact_cli/adapters/ado.py @@ -374,6 +374,22 @@ def _normalize_description(self, fields: dict[str, Any]) -> str: description_raw = html.unescape(description_raw) return description_raw + @beartype + @ensure(lambda result: isinstance(result, str), "Must return string") + def _strip_leading_description_heading(self, content: str) -> str: + """ + Remove a leading Description heading/label from markdown content. + + This prevents duplicated "Description" headers in ADO Description field + when refinement output includes a scaffold heading like `## Description`. + """ + if not content: + return "" + normalized = content.lstrip() + normalized = re.sub(r"^(#{1,6}\s+Description\s*)\n+", "", normalized, count=1, flags=re.IGNORECASE) + normalized = re.sub(r"^Description:\s*\n+", "", normalized, count=1, flags=re.IGNORECASE) + return normalized.strip() + @beartype @require(lambda item_data: isinstance(item_data, dict), "Item data must be dict") @ensure(lambda result: isinstance(result, dict), "Must return dict with extracted fields") @@ -1225,8 +1241,7 @@ def _get_work_item_type(self, org: str, project: str) -> str: "Content-Type": "application/json", **self._auth_headers(), } - response = requests.get(url, headers=headers, timeout=30) - response.raise_for_status() + response = self._ado_get(url, headers=headers, timeout=30) project_data = response.json() # Get process template ID @@ -1397,6 +1412,50 @@ def _auth_headers(self) -> dict[str, str]: return {"Authorization": f"Bearer {self.api_token}"} return {"Authorization": f"Basic {self._encode_pat(self.api_token)}"} + @beartype + @ensure(lambda result: hasattr(result, "raise_for_status"), "Result must support raise_for_status") + def _ado_get( + self, + url: str, + *, + headers: dict[str, str] | None = None, + params: dict[str, Any] | None = None, + timeout: int = 30, + retry_on_ambiguous_transport: bool = True, + ) -> Any: + """Execute an idempotent ADO GET with retry policy for transient failures.""" + return cast( + Any, + self._request_with_retry( + lambda: requests.get(url, headers=headers, params=params, timeout=timeout), + retry_on_ambiguous_transport=retry_on_ambiguous_transport, + ), + ) + + @beartype + @ensure(lambda result: hasattr(result, "raise_for_status"), "Result must support raise_for_status") + def _ado_post( + self, + url: str, + *, + headers: dict[str, str] | None = None, + params: dict[str, Any] | None = None, + json: dict[str, Any] | None = None, + timeout: int = 30, + retry_on_ambiguous_transport: bool = True, + ) -> Any: + """Execute ADO POST with retry policy. Safe for read-only WIQL endpoints.""" + request_kwargs: dict[str, Any] = {"headers": headers, "json": json, "timeout": timeout} + if params: + request_kwargs["params"] = params + return cast( + Any, + self._request_with_retry( + lambda: requests.post(url, **request_kwargs), + retry_on_ambiguous_transport=retry_on_ambiguous_transport, + ), + ) + def _work_item_exists(self, work_item_id: int | str, org: str, project: str) -> bool: """ Check if a work item exists in Azure DevOps. @@ -1426,15 +1485,16 @@ def _work_item_exists(self, work_item_id: int | str, org: str, project: str) -> } try: - response = requests.get(url, headers=headers, timeout=10) - # 200 = exists, 404 = doesn't exist (including deleted) - if response.status_code == 200: - # Check if work item is deleted (System.State == "Removed") - work_item_data = response.json() - fields = work_item_data.get("fields", {}) - state = fields.get("System.State", "") - # Consider "Removed" as non-existent for our purposes - return state != "Removed" + response = self._ado_get(url, headers=headers, timeout=10) + # Check if work item is deleted (System.State == "Removed") + work_item_data = response.json() + fields = work_item_data.get("fields", {}) + state = fields.get("System.State", "") + # Consider "Removed" as non-existent for our purposes + return state != "Removed" + except requests.HTTPError as e: + if e.response is not None and e.response.status_code == 404: + return False return False except requests.RequestException: # On any error, assume it doesn't exist (safer to allow creation) @@ -1469,19 +1529,58 @@ def _get_work_item_data(self, work_item_id: int | str, org: str, project: str) - } try: - response = requests.get(url, headers=headers, timeout=10) - if response.status_code == 200: - work_item_data = response.json() - fields = work_item_data.get("fields", {}) - return { - "title": fields.get("System.Title", ""), - "state": fields.get("System.State", ""), - "description": fields.get("System.Description", ""), - } + response = self._ado_get(url, headers=headers, timeout=10) + work_item_data = response.json() + fields = work_item_data.get("fields", {}) + return { + "title": fields.get("System.Title", ""), + "state": fields.get("System.State", ""), + "description": fields.get("System.Description", ""), + } + except requests.HTTPError as e: + if e.response is not None and e.response.status_code == 404: + return None return None except requests.RequestException: return None + @beartype + @require( + lambda self, issue_id: isinstance(issue_id, str) and len(issue_id.strip()) > 0, "issue_id must be non-empty" + ) + @ensure(lambda result: result is None or isinstance(result, BacklogItem), "Must return BacklogItem or None") + def _fetch_backlog_item_by_id(self, issue_id: str) -> BacklogItem | None: + """Fetch a single ADO work item directly by ID (bypasses WIQL list queries).""" + normalized_id = issue_id.strip() + if not normalized_id.isdigit(): + return None + if not self.org or not self.project: + return None + + url = self._build_ado_url(f"_apis/wit/workitems/{int(normalized_id)}", api_version="7.1") + headers = { + **self._auth_headers(), + "Accept": "application/json", + } + + try: + response = self._ado_get(url, headers=headers, params={"$expand": "all"}, timeout=30) + work_item = response.json() + except requests.HTTPError as e: + if e.response is not None and e.response.status_code == 404: + return None + raise + + from specfact_cli.backlog.converter import convert_ado_work_item_to_backlog_item + + return convert_ado_work_item_to_backlog_item( + work_item, + provider="ado", + base_url=self.base_url, + org=self.org, + project_name=self.project, + ) + def _find_work_item_by_change_id(self, change_id: str, org: str, project: str) -> dict[str, Any] | None: """ Find an existing ADO work item by OpenSpec change_id embedded in the description. @@ -1513,7 +1612,7 @@ def _find_work_item_by_change_id(self, change_id: str, org: str, project: str) - } try: - response = requests.post(url, json=wiql, headers=headers, timeout=10) + response = self._ado_post(url, json=wiql, headers=headers, timeout=10) if is_debug_mode(): debug_log_operation( "ado_wiql", @@ -2379,8 +2478,7 @@ def _get_work_item_comments(self, org: str, project: str, work_item_id: int) -> if continuation_token: params["continuationToken"] = continuation_token - response = requests.get(url, headers=headers, params=params, timeout=30) - response.raise_for_status() + response = self._ado_get(url, headers=headers, params=params, timeout=30) response_data = response.json() raw_comments = response_data.get("comments", []) @@ -2556,16 +2654,18 @@ def _get_current_iteration(self) -> str | None: **self._auth_headers(), "Accept": "application/json", } - project_response = requests.get(project_url, headers=project_headers, params=project_params, timeout=30) - project_response.raise_for_status() + project_response = self._ado_get( + project_url, headers=project_headers, params=project_params, timeout=30 + ) project_data = project_response.json() project_id = project_data.get("id") if project_id: # Get teams for the project teams_url = f"{self.base_url}/{self.org}/_apis/projects/{project_id}/teams" - teams_response = requests.get(teams_url, headers=project_headers, params=project_params, timeout=30) - teams_response.raise_for_status() + teams_response = self._ado_get( + teams_url, headers=project_headers, params=project_params, timeout=30 + ) teams_data = teams_response.json() teams = teams_data.get("value", []) if teams: @@ -2593,8 +2693,7 @@ def _get_current_iteration(self) -> str | None: } try: - response = requests.get(url, headers=headers, params=params, timeout=30) - response.raise_for_status() + response = self._ado_get(url, headers=headers, params=params, timeout=30) data = response.json() iterations = data.get("value", []) if iterations: @@ -2611,8 +2710,7 @@ def _get_current_iteration(self) -> str | None: f"{self.base_url}/{self.org}/{self.project}/{project_encoded}/_apis/work/teamsettings/iterations" ) try: - fallback_response = requests.get(fallback_url, headers=headers, params=params, timeout=30) - fallback_response.raise_for_status() + fallback_response = self._ado_get(fallback_url, headers=headers, params=params, timeout=30) fallback_data = fallback_response.json() fallback_iterations = fallback_data.get("value", []) if fallback_iterations: @@ -2651,15 +2749,17 @@ def _list_available_iterations(self) -> list[str]: **self._auth_headers(), "Accept": "application/json", } - project_response = requests.get(project_url, headers=project_headers, params=project_params, timeout=30) - project_response.raise_for_status() + project_response = self._ado_get( + project_url, headers=project_headers, params=project_params, timeout=30 + ) project_data = project_response.json() project_id = project_data.get("id") if project_id: teams_url = f"{self.base_url}/{self.org}/_apis/projects/{project_id}/teams" - teams_response = requests.get(teams_url, headers=project_headers, params=project_params, timeout=30) - teams_response.raise_for_status() + teams_response = self._ado_get( + teams_url, headers=project_headers, params=project_params, timeout=30 + ) teams_data = teams_response.json() teams = teams_data.get("value", []) if teams: @@ -2684,8 +2784,7 @@ def _list_available_iterations(self) -> list[str]: } try: - response = requests.get(url, headers=headers, params=params, timeout=30) - response.raise_for_status() + response = self._ado_get(url, headers=headers, params=params, timeout=30) data = response.json() iterations = data.get("value", []) return [it.get("path", "") for it in iterations if it.get("path")] @@ -2811,6 +2910,59 @@ def fetch_backlog_items(self, filters: BacklogFilters) -> list[BacklogItem]: msg = "project required to fetch backlog items. Provide via --ado-project option." raise ValueError(msg) + requested_issue_id = str(getattr(filters, "issue_id", "") or "").strip() + if requested_issue_id: + direct_item = self._fetch_backlog_item_by_id(requested_issue_id) + if direct_item is None: + return [] + + filtered_items = [direct_item] + + # Apply post-fetch filters to preserve current command semantics when users also pass filters. + if filters.state: + normalized_state = BacklogFilters.normalize_filter_value(filters.state) + filtered_items = [ + item + for item in filtered_items + if BacklogFilters.normalize_filter_value(item.state) == normalized_state + ] + + if filters.assignee: + normalized_assignee = BacklogFilters.normalize_filter_value(filters.assignee) + filtered_items = [ + item + for item in filtered_items + if any( + BacklogFilters.normalize_filter_value(assignee) == normalized_assignee + for assignee in item.assignees + ) + ] + + if filters.labels: + filtered_items = [ + item for item in filtered_items if any(label in item.tags for label in filters.labels) + ] + + if filters.sprint: + _, filtered_items = self._resolve_sprint_filter( + filters.sprint, + filtered_items, + apply_current_when_missing=False, + ) + + if filters.release: + normalized_release = BacklogFilters.normalize_filter_value(filters.release) + filtered_items = [ + item + for item in filtered_items + if item.release and BacklogFilters.normalize_filter_value(item.release) == normalized_release + ] + + if filters.limit is not None and len(filtered_items) > filters.limit: + filtered_items = filtered_items[: filters.limit] + + return filtered_items + # Build WIQL (Work Item Query Language) query # WIQL syntax: SELECT fields FROM WorkItems WHERE conditions # Use @project macro to reference the project context in project-scoped queries @@ -2909,8 +3061,7 @@ def fetch_backlog_items(self, filters: BacklogFilters) -> list[BacklogItem]: debug_print("[yellow]Warning: No Authorization header in request[/yellow]") try: - response = requests.post(url, headers=headers, json=payload, timeout=30) - response.raise_for_status() + response = self._ado_post(url, headers=headers, json=payload, timeout=30) except requests.HTTPError as e: # Provide user-friendly error message user_friendly_msg = None @@ -3045,7 +3196,7 @@ def fetch_backlog_items(self, filters: BacklogFilters) -> list[BacklogItem]: debug_print(f"[dim]ADO WorkItems URL: {url}&ids={ids_str}[/dim]") try: - response = requests.get(url, headers=workitems_headers, params=params, timeout=30) + response = self._ado_get(url, headers=workitems_headers, params=params, timeout=30) if is_debug_mode(): debug_log_operation( "ado_workitems_get", @@ -3053,7 +3204,6 @@ def fetch_backlog_items(self, filters: BacklogFilters) -> list[BacklogItem]: str(response.status_code), error=None if response.ok else (response.text[:200] if response.text else None), ) - response.raise_for_status() except requests.HTTPError as e: if is_debug_mode(): debug_log_operation( @@ -3184,12 +3334,25 @@ def create_issue(self, project_id: str, payload: dict[str, Any]) -> dict[str, An work_item_type = type_mapping.get(raw_type, "Task") description = str(payload.get("description") or payload.get("body") or "").strip() + description = self._strip_leading_description_heading(description) description_format = str(payload.get("description_format") or "markdown").strip().lower() field_rendering_format = "Markdown" if description_format != "classic" else "Html" + + custom_mapping_file = os.environ.get("SPECFACT_ADO_CUSTOM_MAPPING") + ado_mapper = AdoFieldMapper(custom_mapping_file=custom_mapping_file) + description_field = ado_mapper.resolve_write_target_field("description") or "System.Description" + acceptance_criteria_field = ( + ado_mapper.resolve_write_target_field("acceptance_criteria") or "Microsoft.VSTS.Common.AcceptanceCriteria" + ) + priority_field = ado_mapper.resolve_write_target_field("priority") or "Microsoft.VSTS.Common.Priority" + story_points_field = ( + ado_mapper.resolve_write_target_field("story_points") or "Microsoft.VSTS.Scheduling.StoryPoints" + ) + patch_document: list[dict[str, Any]] = [ {"op": "add", "path": "/fields/System.Title", "value": title}, - {"op": "add", "path": "/fields/System.Description", "value": description}, - {"op": "add", "path": "/multilineFieldsFormat/System.Description", "value": field_rendering_format}, + {"op": "add", "path": f"/fields/{description_field}", "value": description}, + {"op": "add", "path": f"/multilineFieldsFormat/{description_field}", "value": field_rendering_format}, ] acceptance_criteria = str(payload.get("acceptance_criteria") or "").strip() @@ -3197,7 +3360,14 @@ def create_issue(self, project_id: str, payload: dict[str, Any]) -> dict[str, An patch_document.append( { "op": "add", - "path": "/fields/Microsoft.VSTS.Common.AcceptanceCriteria", + "path": f"/multilineFieldsFormat/{acceptance_criteria_field}", + "value": field_rendering_format, + } + ) + patch_document.append( + { + "op": "add", + "path": f"/fields/{acceptance_criteria_field}", "value": acceptance_criteria, } ) @@ -3207,7 +3377,7 @@ def create_issue(self, project_id: str, payload: dict[str, Any]) -> dict[str, An patch_document.append( { "op": "add", - "path": "/fields/Microsoft.VSTS.Common.Priority", + "path": f"/fields/{priority_field}", "value": priority, } ) @@ -3217,7 +3387,7 @@ def create_issue(self, project_id: str, payload: dict[str, Any]) -> dict[str, An patch_document.append( { "op": "add", - "path": "/fields/Microsoft.VSTS.Scheduling.StoryPoints", + "path": f"/fields/{story_points_field}", "value": story_points, } ) @@ -3467,33 +3637,10 @@ def update_backlog_item(self, item: BacklogItem, update_fields: list[str] | None # Use AdoFieldMapper for field writeback (honor custom field mappings) custom_mapping_file = os.environ.get("SPECFACT_ADO_CUSTOM_MAPPING") ado_mapper = AdoFieldMapper(custom_mapping_file=custom_mapping_file) - canonical_fields: dict[str, Any] = { - "description": item.body_markdown, - "acceptance_criteria": item.acceptance_criteria, - "story_points": item.story_points, - "business_value": item.business_value, - "priority": item.priority, - "value_points": item.value_points, - "work_item_type": item.work_item_type, - } - - # Map canonical fields to ADO fields (uses custom mappings if provided) - ado_fields = ado_mapper.map_from_canonical(canonical_fields) - - # Get reverse mapping to find ADO field names for canonical fields - # Use same preference logic as map_from_canonical: prefer System.* over Microsoft.VSTS.Common.* - field_mappings = ado_mapper._get_field_mappings() - reverse_mappings: dict[str, str] = {} - for ado_field, canonical in field_mappings.items(): - if canonical not in reverse_mappings: - # First mapping for this canonical field - use it - reverse_mappings[canonical] = ado_field - else: - # Multiple mappings exist - prefer System.* over Microsoft.VSTS.Common.* - current_ado_field = reverse_mappings[canonical] - # Prefer System.* fields for write operations (more common in Scrum) - if ado_field.startswith("System.") and not current_ado_field.startswith("System."): - reverse_mappings[canonical] = ado_field + provider_field_names = set() + provider_fields_payload = item.provider_fields.get("fields") + if isinstance(provider_fields_payload, dict): + provider_field_names = {str(field_name) for field_name in provider_fields_payload} # Update description (body_markdown) - always use System.Description if update_fields is None or "body" in update_fields or "body_markdown" in update_fields: @@ -3502,6 +3649,7 @@ def update_backlog_item(self, item: BacklogItem, update_fields: list[str] | None # Never send null: ADO rejects null for /fields/System.Description (HTTP 400) raw_body = item.body_markdown markdown_content = raw_body if raw_body is not None else "" + markdown_content = self._strip_leading_description_heading(markdown_content) # Convert TODO markers to proper Markdown checkboxes for ADO rendering todo_pattern = r"^(\s*)[-*]\s*\[TODO[:\s]+([^\]]+)\](.*)$" markdown_content = re.sub( @@ -3511,44 +3659,50 @@ def update_backlog_item(self, item: BacklogItem, update_fields: list[str] | None flags=re.MULTILINE | re.IGNORECASE, ) - description_field = reverse_mappings.get("description", "System.Description") + description_field = ( + ado_mapper.resolve_write_target_field("description", provider_field_names) or "System.Description" + ) # Set multiline field format to Markdown first (optional; many ADO instances return 400 for this path) operations.append({"op": "add", "path": f"/multilineFieldsFormat/{description_field}", "value": "Markdown"}) operations.append({"op": "replace", "path": f"/fields/{description_field}", "value": markdown_content}) # Update acceptance criteria using mapped field name (honors custom mappings) if update_fields is None or "acceptance_criteria" in update_fields: - acceptance_criteria_field = reverse_mappings.get("acceptance_criteria") - # Check if field exists in mapped fields (means it's available in ADO) and has value - if acceptance_criteria_field and item.acceptance_criteria and acceptance_criteria_field in ado_fields: + acceptance_criteria_field = ado_mapper.resolve_write_target_field( + "acceptance_criteria", provider_field_names + ) + if acceptance_criteria_field and item.acceptance_criteria: + operations.append( + { + "op": "add", + "path": f"/multilineFieldsFormat/{acceptance_criteria_field}", + "value": "Markdown", + } + ) operations.append( {"op": "replace", "path": f"/fields/{acceptance_criteria_field}", "value": item.acceptance_criteria} ) # Update story points using mapped field name (honors custom mappings) if update_fields is None or "story_points" in update_fields: - story_points_field = reverse_mappings.get("story_points") - # Check if field exists in mapped fields (means it's available in ADO) and has value - # Handle both Microsoft.VSTS.Common.StoryPoints and Microsoft.VSTS.Scheduling.StoryPoints - if story_points_field and item.story_points is not None and story_points_field in ado_fields: + story_points_field = ado_mapper.resolve_write_target_field("story_points", provider_field_names) + if story_points_field and item.story_points is not None: operations.append( {"op": "replace", "path": f"/fields/{story_points_field}", "value": item.story_points} ) # Update business value using mapped field name (honors custom mappings) if update_fields is None or "business_value" in update_fields: - business_value_field = reverse_mappings.get("business_value") - # Check if field exists in mapped fields (means it's available in ADO) and has value - if business_value_field and item.business_value is not None and business_value_field in ado_fields: + business_value_field = ado_mapper.resolve_write_target_field("business_value", provider_field_names) + if business_value_field and item.business_value is not None: operations.append( {"op": "replace", "path": f"/fields/{business_value_field}", "value": item.business_value} ) # Update priority using mapped field name (honors custom mappings) if update_fields is None or "priority" in update_fields: - priority_field = reverse_mappings.get("priority") - # Check if field exists in mapped fields (means it's available in ADO) and has value - if priority_field and item.priority is not None and priority_field in ado_fields: + priority_field = ado_mapper.resolve_write_target_field("priority", provider_field_names) + if priority_field and item.priority is not None: operations.append({"op": "replace", "path": f"/fields/{priority_field}", "value": item.priority}) if update_fields is None or "state" in update_fields: @@ -3611,29 +3765,41 @@ def update_backlog_item(self, item: BacklogItem, update_fields: list[str] | None # Third: HTML fallback (no multilineFieldsFormat, description as HTML) import re as _re - console.print("[yellow]โš  Markdown format not supported, converting description to HTML[/yellow]") + console.print( + "[yellow]โš  Markdown format metadata not supported, converting multiline markdown fields to HTML[/yellow]" + ) + markdown_formatted_fields = { + str(op.get("path", "")).replace("/multilineFieldsFormat/", "", 1) + for op in operations + if str(op.get("path", "")).startswith("/multilineFieldsFormat/") + and str(op.get("value", "")).lower() == "markdown" + } + + def _markdown_to_html(value: str) -> str: + todo_pattern = r"^(\s*)[-*]\s*\[TODO[:\s]+([^\]]+)\](.*)$" + normalized_markdown = _re.sub( + todo_pattern, + r"\1- [ ] \2", + value, + flags=_re.MULTILINE | _re.IGNORECASE, + ) + try: + import markdown + + return markdown.markdown(normalized_markdown, extensions=["fenced_code", "tables"]) + except ImportError: + return normalized_markdown + operations_html = [ op for op in operations if not (op.get("path") or "").startswith("/multilineFieldsFormat/") ] - description_field = reverse_mappings.get("description", "System.Description") - desc_path = f"/fields/{description_field}" for op in operations_html: - if op.get("path") == desc_path: - markdown_for_html = op.get("value") or "" - todo_pattern = r"^(\s*)[-*]\s*\[TODO[:\s]+([^\]]+)\](.*)$" - markdown_for_html = _re.sub( - todo_pattern, - r"\1- [ ] \2", - markdown_for_html, - flags=_re.MULTILINE | _re.IGNORECASE, - ) - try: - import markdown - - op["value"] = markdown.markdown(markdown_for_html, extensions=["fenced_code", "tables"]) - except ImportError: - pass - break + field_path = str(op.get("path", "")) + if not field_path.startswith("/fields/"): + continue + field_name = field_path.replace("/fields/", "", 1) + if field_name in markdown_formatted_fields: + op["value"] = _markdown_to_html(str(op.get("value") or "")) try: resp = requests.patch(url, headers=headers, json=operations_html, timeout=30) resp.raise_for_status() diff --git a/src/specfact_cli/adapters/github.py b/src/specfact_cli/adapters/github.py index 0d74b7c7..0e3eff2b 100644 --- a/src/specfact_cli/adapters/github.py +++ b/src/specfact_cli/adapters/github.py @@ -2515,6 +2515,11 @@ def fetch_backlog_items(self, filters: BacklogFilters) -> list[BacklogItem]: msg = "repo_owner and repo_name required to fetch backlog items" raise ValueError(msg) + if filters.issue_id: + direct_item = self._fetch_backlog_item_by_id(filters.issue_id) + direct_items = [direct_item] if direct_item is not None else [] + return self._apply_backlog_post_filters(direct_items, filters) + # Build GitHub search query # Note: GitHub search API is case-insensitive for state, but we'll apply # case-insensitive filtering post-fetch for assignee to handle display names @@ -2556,7 +2561,7 @@ def fetch_backlog_items(self, filters: BacklogFilters) -> list[BacklogItem]: while True: params["page"] = page - response = requests.get(url, headers=headers, params=params, timeout=30) + response = self._request_with_retry(lambda: requests.get(url, headers=headers, params=params, timeout=30)) response.raise_for_status() data = response.json() @@ -2576,7 +2581,43 @@ def fetch_backlog_items(self, filters: BacklogFilters) -> list[BacklogItem]: break page += 1 - # Apply post-fetch filters that GitHub API doesn't support directly + return self._apply_backlog_post_filters(items, filters) + + @beartype + def _fetch_backlog_item_by_id(self, issue_id: str) -> BacklogItem | None: + """Fetch a single GitHub issue by number for deterministic ID lookup flows.""" + normalized_id = issue_id.strip().lstrip("#") + if not normalized_id: + return None + + url = f"{self.base_url}/repos/{self.repo_owner}/{self.repo_name}/issues/{normalized_id}" + headers = { + "Authorization": f"token {self.api_token}", + "Accept": "application/vnd.github.v3+json", + } + try: + response = self._request_with_retry(lambda: requests.get(url, headers=headers, timeout=30)) + response.raise_for_status() + except requests.HTTPError as exc: + status_code = exc.response.status_code if exc.response is not None else None + if status_code == 404: + return None + raise + + issue_payload = response.json() + if not isinstance(issue_payload, dict): + return None + if issue_payload.get("pull_request") is not None: + # Backlog issue commands should not resolve pull requests. + return None + + from specfact_cli.backlog.converter import convert_github_issue_to_backlog_item + + return convert_github_issue_to_backlog_item(issue_payload, provider="github") + + @beartype + def _apply_backlog_post_filters(self, items: list[BacklogItem], filters: BacklogFilters) -> list[BacklogItem]: + """Apply post-fetch filters for both search and direct ID lookup paths.""" filtered_items = items # Case-insensitive state filtering (GitHub API may return mixed case) @@ -2611,6 +2652,28 @@ def fetch_backlog_items(self, filters: BacklogFilters) -> list[BacklogItem]: ) ] + if filters.labels: + normalized_labels = { + normalized_label + for normalized_label in ( + BacklogFilters.normalize_filter_value(raw_label) for raw_label in filters.labels + ) + if normalized_label + } + filtered_items = [ + item + for item in filtered_items + if any( + tag_value in normalized_labels + for tag_value in (BacklogFilters.normalize_filter_value(tag) for tag in item.tags) + if tag_value + ) + ] + + # Do not re-apply `filters.search` locally as plain-text matching. + # GitHub already evaluates provider-specific search syntax server-side + # (for example `label:bug`, `is:open`, `no:assignee`). + if filters.iteration: filtered_items = [item for item in filtered_items if item.iteration and item.iteration == filters.iteration] @@ -2664,6 +2727,25 @@ def _github_graphql(self, query: str, variables: dict[str, Any]) -> dict[str, An data = payload.get("data") return data if isinstance(data, dict) else {} + @staticmethod + @beartype + def _resolve_github_type_mapping_id(mapping: dict[str, Any], issue_type: str) -> str: + """ + Resolve GitHub issue-type/project-type mapping id with fallback aliases. + + Default alias fallback: + - `story` -> `feature` when `story` is unavailable in the repository. + """ + normalized = issue_type.strip().lower() + candidate_keys = [issue_type, normalized] + if normalized == "story": + candidate_keys.extend(["feature", "Feature"]) + for key in candidate_keys: + mapped = str(mapping.get(key) or "").strip() + if mapped: + return mapped + return "" + @beartype def _try_set_github_issue_type( self, @@ -2682,7 +2764,7 @@ def _try_set_github_issue_type( if not isinstance(type_ids, dict): return - issue_type_id = str(type_ids.get(issue_type) or type_ids.get(issue_type.lower()) or "").strip() + issue_type_id = self._resolve_github_type_mapping_id(type_ids, issue_type) if not issue_type_id: return @@ -2770,7 +2852,7 @@ def _try_set_github_project_type_field( if not isinstance(option_map, dict): return - option_id = str(option_map.get(issue_type) or option_map.get(issue_type.lower()) or "").strip() + option_id = self._resolve_github_type_mapping_id(option_map, issue_type) if not project_id or not type_field_id or not option_id: return diff --git a/src/specfact_cli/backlog/ai_refiner.py b/src/specfact_cli/backlog/ai_refiner.py index 28bf681e..36b6b18f 100644 --- a/src/specfact_cli/backlog/ai_refiner.py +++ b/src/specfact_cli/backlog/ai_refiner.py @@ -18,6 +18,7 @@ from beartype import beartype from icontract import ensure, require +from specfact_cli.backlog.template_detector import get_effective_required_sections from specfact_cli.models.backlog_item import BacklogItem from specfact_cli.templates.registry import BacklogTemplate @@ -96,11 +97,16 @@ def generate_refinement_prompt( Returns: Prompt string for IDE AI copilot """ - required_sections_str = "\n".join(f"- {section}" for section in template.required_sections) + effective_required_sections = get_effective_required_sections(item, template) + required_sections_str = "\n".join(f"- {section}" for section in effective_required_sections) or "- None" + optional_sections = list(template.optional_sections or []) + if item.provider.lower() == "ado": + ado_structured_optional_sections = {"Area Path", "Iteration Path"} + optional_sections = [ + section for section in optional_sections if section not in ado_structured_optional_sections + ] optional_sections_str = ( - "\n".join(f"- {section}" for section in template.optional_sections) - if template.optional_sections - else "None" + "\n".join(f"- {section}" for section in optional_sections) if optional_sections else "None" ) # Provider-specific instructions @@ -111,9 +117,10 @@ def generate_refinement_prompt( Each required section should be a markdown heading with content below it.""" elif item.provider == "ado": provider_instructions = """ -For Azure DevOps work items: Note that fields are separate (not markdown headings in body). -However, for refinement purposes, structure the content as markdown headings in the body. -The adapter will map these back to separate ADO fields during writeback.""" +For Azure DevOps work items: fields are structured and mapped separately. +- Keep metadata values (Story Points, Business Value, Priority, Work Item Type) in the metadata block, not as body headings. +- Keep the description body narrative clean (no duplicated metadata labels/headings inside description text). +- The adapter maps metadata and acceptance fields back to ADO structured fields during writeback.""" # Include story points, business value, priority if available metrics_info = "" @@ -163,17 +170,18 @@ def generate_refinement_prompt( Instructions: 1. Preserve all original requirements, scope, and technical details 2. Do NOT add new features or change the scope -3. Transform the content to match the template structure -4. If information is missing for a required section, use a Markdown checkbox: - [ ] describe what's needed -5. If you detect conflicting or ambiguous information, add a [NOTES] section at the end explaining the ambiguity -6. Use markdown formatting for sections (## Section Name) -7. Include story points, business value, priority, and work item type if available in the appropriate sections -8. For stories with high story points (>13 for Scrum, >21 for SAFe), consider suggesting story splitting -9. Provider-aware formatting: +3. Do NOT summarize, shorten, or silently drop details from the original story content +4. Transform the content to match the template structure +5. If information is missing for a required section, use a Markdown checkbox: - [ ] describe what's needed +6. If you detect conflicting or ambiguous information, add a [NOTES] section at the end explaining the ambiguity +7. Use markdown formatting for sections (## Section Name) +8. Include story points, business value, priority, and work item type if available in the appropriate sections +9. For stories with high story points (>13 for Scrum, >21 for SAFe), consider suggesting story splitting +10. Provider-aware formatting: - **GitHub**: Use markdown headings in body (## Section Name) - **ADO**: Use markdown headings in body (will be mapped to separate ADO fields during writeback) -10. Omit unknown metadata fields instead of placeholders (do not emit values like "unspecified", "no info provided", or "provide area path") -11. Keep `## Description` focused on narrative body content; do not place metadata labels in description text. +11. Omit unknown metadata fields instead of placeholders (do not emit values like "unspecified", "no info provided", or "provide area path") +12. Keep `## Description` focused on narrative body content; do not place metadata labels in description text. Expected Output Scaffold (ordered): ## Work Item Properties / Metadata @@ -225,9 +233,13 @@ def _validate_required_sections(self, refined_body: str, template: BacklogTempla if not template.required_sections: return True # No requirements = valid + effective_required_sections = get_effective_required_sections(item, template) + if not effective_required_sections: + return True + # Refined content is always markdown (from AI copilot), so check markdown headings body_lower = refined_body.lower() - for section in template.required_sections: + for section in effective_required_sections: section_lower = section.lower() # Check for markdown heading heading_pattern = rf"^#+\s+{re.escape(section_lower)}\s*$" @@ -249,8 +261,11 @@ def _has_todo_markers(self, refined_body: str) -> bool: Returns: True if TODO markers are present, False otherwise """ + # Use normalized uppercase text + case-sensitive regex to avoid regex engine + # edge-cases with IGNORECASE in symbolic/exploration contexts. + normalized_body = refined_body.upper() todo_pattern = r"\[TODO[:\s][^\]]+\]" - return bool(re.search(todo_pattern, refined_body, re.IGNORECASE)) + return bool(re.search(todo_pattern, normalized_body)) @beartype @require(lambda self, refined_body: isinstance(refined_body, str), "Refined body must be string") @@ -325,7 +340,7 @@ def validate_and_score_refinement( # Validate required sections (provider-aware) if not self._validate_required_sections(refined_body, template, item): - msg = f"Refined content is missing required sections: {template.required_sections}" + msg = f"Refined content is missing required sections: {get_effective_required_sections(item, template)}" raise ValueError(msg) # Validate story points, business value, priority fields if present diff --git a/src/specfact_cli/backlog/converter.py b/src/specfact_cli/backlog/converter.py index 5eac3476..3c4e4b43 100644 --- a/src/specfact_cli/backlog/converter.py +++ b/src/specfact_cli/backlog/converter.py @@ -24,6 +24,14 @@ @beartype @require(lambda item_data: isinstance(item_data, dict), "Item data must be dict") @require(lambda provider: isinstance(provider, str) and len(provider) > 0, "Provider must be non-empty string") +@require( + lambda item_data: bool(item_data.get("number") or item_data.get("id")), + "GitHub issue must include 'number' or 'id'", +) +@require( + lambda item_data: bool(item_data.get("html_url") or item_data.get("url")), + "GitHub issue must include 'html_url' or 'url'", +) @ensure(lambda result: isinstance(result, BacklogItem), "Must return BacklogItem") def convert_github_issue_to_backlog_item(item_data: dict[str, Any], provider: str = "github") -> BacklogItem: """ @@ -156,6 +164,11 @@ def convert_github_issue_to_backlog_item(item_data: dict[str, Any], provider: st @beartype @require(lambda item_data: isinstance(item_data, dict), "Item data must be dict") @require(lambda provider: isinstance(provider, str) and len(provider) > 0, "Provider must be non-empty string") +@require(lambda item_data: bool(item_data.get("id")), "ADO work item must include 'id'") +@require( + lambda item_data: bool(item_data.get("url") or item_data.get("_links", {}).get("html", {}).get("href", "")), + "ADO work item must include 'url' or '_links.html.href'", +) @ensure(lambda result: isinstance(result, BacklogItem), "Must return BacklogItem") def convert_ado_work_item_to_backlog_item( item_data: dict[str, Any], @@ -207,7 +220,6 @@ def convert_ado_work_item_to_backlog_item( msg = "ADO work item must have 'System.Title' field" raise ValueError(msg) - body_markdown = fields.get("System.Description", "") or "" state = fields.get("System.State", "New").lower() # Extract fields using AdoFieldMapper (with optional custom mapping) @@ -218,6 +230,12 @@ def convert_ado_work_item_to_backlog_item( custom_mapping_file = os.environ.get("SPECFACT_ADO_CUSTOM_MAPPING") ado_mapper = AdoFieldMapper(custom_mapping_file=custom_mapping_file) extracted_fields = ado_mapper.extract_fields(item_data) + extracted_description = extracted_fields.get("description") + body_markdown = ( + extracted_description + if isinstance(extracted_description, str) and extracted_description + else (fields.get("System.Description", "") or "") + ) acceptance_criteria = extracted_fields.get("acceptance_criteria") story_points = extracted_fields.get("story_points") business_value = extracted_fields.get("business_value") @@ -359,7 +377,7 @@ def _parse_github_timestamp(timestamp: str | None) -> datetime: if dt.tzinfo is None: dt = dt.replace(tzinfo=UTC) return dt - except (ValueError, AttributeError): + except (TypeError, ValueError, AttributeError): return datetime.now(UTC) @@ -385,5 +403,5 @@ def _parse_ado_timestamp(timestamp: str | None) -> datetime: if dt.tzinfo is None: dt = dt.replace(tzinfo=UTC) return dt - except (ValueError, AttributeError): + except (TypeError, ValueError, AttributeError): return datetime.now(UTC) diff --git a/src/specfact_cli/backlog/filters.py b/src/specfact_cli/backlog/filters.py index 41411421..c09d5ef0 100644 --- a/src/specfact_cli/backlog/filters.py +++ b/src/specfact_cli/backlog/filters.py @@ -40,6 +40,8 @@ class BacklogFilters: """Filter by sprint identifier.""" release: str | None = None """Filter by release identifier.""" + issue_id: str | None = None + """Filter by a single issue/work item identifier.""" limit: int | None = None """Maximum number of items to fetch (applied after filtering).""" use_current_iteration_default: bool = True diff --git a/src/specfact_cli/backlog/mappers/ado_mapper.py b/src/specfact_cli/backlog/mappers/ado_mapper.py index d3b212ef..59838072 100644 --- a/src/specfact_cli/backlog/mappers/ado_mapper.py +++ b/src/specfact_cli/backlog/mappers/ado_mapper.py @@ -8,6 +8,8 @@ from __future__ import annotations +import html +import re from pathlib import Path from typing import Any @@ -156,33 +158,52 @@ def map_from_canonical(self, canonical_fields: dict[str, Any]) -> dict[str, Any] Returns: Dict mapping ADO field names to values """ - # Use custom mapping if available, otherwise use defaults - field_mappings = self._get_field_mappings() - - # Build reverse mapping with preference for System.* fields over Microsoft.VSTS.Common.* - # This ensures write operations use the more common System.* fields (better Scrum compatibility) - reverse_mappings: dict[str, str] = {} - for ado_field, canonical in field_mappings.items(): - if canonical not in reverse_mappings: - # First mapping for this canonical field - use it - reverse_mappings[canonical] = ado_field - else: - # Multiple mappings exist - prefer System.* over Microsoft.VSTS.Common.* - current_ado_field = reverse_mappings[canonical] - # Prefer System.* fields for write operations (more common in Scrum) - if ado_field.startswith("System.") and not current_ado_field.startswith("System."): - reverse_mappings[canonical] = ado_field - ado_fields: dict[str, Any] = {} # Map each canonical field to ADO field for canonical_field, value in canonical_fields.items(): - if canonical_field in reverse_mappings: - ado_field_name = reverse_mappings[canonical_field] + ado_field_name = self.resolve_write_target_field(canonical_field) + if ado_field_name: ado_fields[ado_field_name] = value return ado_fields + @beartype + @require(lambda self, canonical_field: isinstance(canonical_field, str), "Canonical field must be str") + @require( + lambda self, provider_field_names: provider_field_names is None + or isinstance(provider_field_names, (set, frozenset)), + "provider_field_names must be set-like or None", + ) + @ensure(lambda result: result is None or isinstance(result, str), "Must return str or None") + def resolve_write_target_field( + self, + canonical_field: str, + provider_field_names: set[str] | frozenset[str] | None = None, + ) -> str | None: + """ + Resolve deterministic ADO write-target field for a canonical field. + + Precedence: + 1. Explicit custom mapping candidates (in custom mapping file order) + 2. Provider-present candidates from mapped fields + 3. Mapper fallback candidate order + """ + custom_candidates = self._get_custom_write_target_candidates(canonical_field) + if custom_candidates: + return custom_candidates[0] + + candidates = self._get_write_target_candidates(canonical_field) + if not candidates: + return None + + provider_fields = {f for f in (provider_field_names or set()) if isinstance(f, str)} + for candidate in candidates: + if candidate in provider_fields: + return candidate + + return candidates[0] + @beartype @ensure(lambda result: isinstance(result, dict), "Must return dict") def _get_field_mappings(self) -> dict[str, str]: @@ -199,6 +220,41 @@ def _get_field_mappings(self) -> dict[str, str]: return mappings return self.DEFAULT_FIELD_MAPPINGS.copy() + @beartype + @require(lambda self, canonical_field: isinstance(canonical_field, str), "Canonical field must be str") + @ensure(lambda result: isinstance(result, list), "Must return list") + def _get_write_target_candidates(self, canonical_field: str) -> list[str]: + """Return ordered write-target candidates for a canonical field.""" + ordered: list[str] = [] + seen: set[str] = set() + + def _add(ado_field: str) -> None: + if ado_field not in seen: + ordered.append(ado_field) + seen.add(ado_field) + + for ado_field in self._get_custom_write_target_candidates(canonical_field): + _add(ado_field) + + for ado_field, mapped_canonical in self._get_field_mappings().items(): + if mapped_canonical == canonical_field: + _add(ado_field) + + return ordered + + @beartype + @require(lambda self, canonical_field: isinstance(canonical_field, str), "Canonical field must be str") + @ensure(lambda result: isinstance(result, list), "Must return list") + def _get_custom_write_target_candidates(self, canonical_field: str) -> list[str]: + """Return ordered candidates declared explicitly in custom mapping.""" + if not (self.custom_mapping and self.custom_mapping.field_mappings): + return [] + return [ + ado_field + for ado_field, mapped_canonical in self.custom_mapping.field_mappings.items() + if mapped_canonical == canonical_field + ] + @beartype @require(lambda self, fields_dict: isinstance(fields_dict, dict), "Fields dict must be dict") @require(lambda self, field_mappings: isinstance(field_mappings, dict), "Field mappings must be dict") @@ -228,7 +284,8 @@ def _extract_field( if canonical == canonical_field: value = fields_dict.get(ado_field) if value is not None: - return str(value).strip() if isinstance(value, str) else str(value) + normalized_value = str(value).strip() if isinstance(value, str) else str(value) + return self._normalize_rich_text_to_markdown(normalized_value) return None @beartype @@ -262,6 +319,48 @@ def _extract_numeric_field( return None return None + @beartype + @require(lambda self, value: isinstance(value, str), "Value must be str") + @ensure(lambda result: isinstance(result, str), "Must return str") + def _normalize_rich_text_to_markdown(self, value: str) -> str: + """Normalize ADO rich text content to markdown-like plain text.""" + if not value: + return value + + value = html.unescape(value) + + rich_text_tag_pattern = re.compile( + r"</?(?:p|br|strong|b|em|i|pre|code|a|li|ul|ol|h[1-6]|div|span|blockquote)(?:\s[^<>]*)?>", + flags=re.IGNORECASE, + ) + if not rich_text_tag_pattern.search(value): + return value + + normalized = re.sub(r"<!--.*?-->", "", value, flags=re.DOTALL) + normalized = re.sub(r"<h([1-6])[^>]*>(.*?)</h[1-6]>", self._replace_heading, normalized, flags=re.DOTALL) + normalized = re.sub(r"<strong>(.*?)</strong>", r"**\1**", normalized, flags=re.DOTALL | re.IGNORECASE) + normalized = re.sub(r"<b>(.*?)</b>", r"**\1**", normalized, flags=re.DOTALL | re.IGNORECASE) + normalized = re.sub(r"<em>(.*?)</em>", r"*\1*", normalized, flags=re.DOTALL | re.IGNORECASE) + normalized = re.sub(r"<i>(.*?)</i>", r"*\1*", normalized, flags=re.DOTALL | re.IGNORECASE) + normalized = re.sub(r"<pre><code>(.*?)</code></pre>", r"```\n\1\n```", normalized, flags=re.DOTALL) + normalized = re.sub(r"<code>(.*?)</code>", r"`\1`", normalized, flags=re.DOTALL) + normalized = re.sub(r'<a href="([^"]+)">(.*?)</a>', r"[\2](\1)", normalized, flags=re.DOTALL) + normalized = re.sub(r"<li>(.*?)</li>", r"- \1", normalized, flags=re.DOTALL | re.IGNORECASE) + normalized = re.sub(r"<ul>|</ul>|<ol>|</ol>", "", normalized, flags=re.IGNORECASE) + normalized = re.sub(r"<p>(.*?)</p>", r"\1\n\n", normalized, flags=re.DOTALL | re.IGNORECASE) + normalized = re.sub(r"<br\s*/?>", "\n", normalized, flags=re.IGNORECASE) + normalized = rich_text_tag_pattern.sub("", normalized) + normalized = re.sub(r"\n{3,}", "\n\n", normalized) + return normalized.strip() + + @staticmethod + @beartype + def _replace_heading(match: re.Match[str]) -> str: + """Convert HTML heading match to markdown heading.""" + level = int(match.group(1)) + content = match.group(2) + return f"\n{'#' * level} {content}\n" + @beartype @require(lambda self, fields_dict: isinstance(fields_dict, dict), "Fields dict must be dict") @require(lambda self, field_mappings: isinstance(field_mappings, dict), "Field mappings must be dict") diff --git a/src/specfact_cli/backlog/template_detector.py b/src/specfact_cli/backlog/template_detector.py index 84486ed9..3a4820cc 100644 --- a/src/specfact_cli/backlog/template_detector.py +++ b/src/specfact_cli/backlog/template_detector.py @@ -16,6 +16,33 @@ from specfact_cli.templates.registry import BacklogTemplate, TemplateRegistry +@beartype +@require(lambda item: isinstance(item, BacklogItem), "Item must be BacklogItem") +@require(lambda template: isinstance(template, BacklogTemplate), "Template must be BacklogTemplate") +@ensure(lambda result: isinstance(result, list), "Must return list") +def get_effective_required_sections(item: BacklogItem, template: BacklogTemplate) -> list[str]: + """ + Return required sections that should be validated in body content for this provider. + + For ADO, structured fields are stored outside the description body and should not be + enforced as markdown body sections. + """ + required_sections = list(template.required_sections or []) + if item.provider.lower() != "ado": + return required_sections + + ado_structured_sections = { + "Story Points", + "Business Value", + "Priority", + "Work Item Type", + "Value Points", + "Area Path", + "Iteration Path", + } + return [section for section in required_sections if section not in ado_structured_sections] + + class TemplateDetectionResult: """Result of template detection with confidence and missing fields.""" @@ -72,13 +99,14 @@ def _score_structural_fit(self, item: BacklogItem, template: BacklogTemplate) -> Returns: Structural fit score (0.0-1.0) """ - if not template.required_sections: + required_sections = get_effective_required_sections(item, template) + if not required_sections: return 1.0 # No requirements = perfect match body_lower = item.body_markdown.lower() found_sections = 0 - for section in template.required_sections: + for section in required_sections: # Check for exact heading match (markdown heading) section_lower = section.lower() # Match markdown headings: # Section, ## Section, ### Section, etc. @@ -91,10 +119,10 @@ def _score_structural_fit(self, item: BacklogItem, template: BacklogTemplate) -> if section_lower in body_lower: found_sections += 1 - if not template.required_sections: + if not required_sections: return 1.0 - return found_sections / len(template.required_sections) + return found_sections / len(required_sections) @beartype @require(lambda self, item: isinstance(item, BacklogItem), "Item must be BacklogItem") @@ -154,7 +182,7 @@ def _find_missing_fields(self, item: BacklogItem, template: BacklogTemplate) -> missing: list[str] = [] body_lower = item.body_markdown.lower() - for section in template.required_sections: + for section in get_effective_required_sections(item, template): section_lower = section.lower() # Check for exact heading match heading_pattern = rf"^#+\s+{re.escape(section_lower)}\s*$" diff --git a/src/specfact_cli/cli.py b/src/specfact_cli/cli.py index ae69924e..f2a22993 100644 --- a/src/specfact_cli/cli.py +++ b/src/specfact_cli/cli.py @@ -281,6 +281,13 @@ def main( - Auto-detect from terminal and CI environment """ global _show_banner + global console + + # Rebind root and loaded module consoles for each invocation to avoid stale + # closed capture streams across sequential CliRunner/pytest command runs. + console = get_configured_console() + runtime.refresh_loaded_module_consoles() + # Set banner flag based on --banner option _show_banner = banner diff --git a/src/specfact_cli/models/enforcement.py b/src/specfact_cli/models/enforcement.py index 2091abd7..39f4c56b 100644 --- a/src/specfact_cli/models/enforcement.py +++ b/src/specfact_cli/models/enforcement.py @@ -39,7 +39,6 @@ class EnforcementConfig(BaseModel): enabled: bool = Field(default=True, description="Whether enforcement is enabled") @classmethod - @beartype @require(lambda preset: preset in EnforcementPreset, "Preset must be valid EnforcementPreset") @ensure(lambda result: isinstance(result, EnforcementConfig), "Must return EnforcementConfig") @ensure(lambda result: result.enabled is True, "Config must be enabled") diff --git a/src/specfact_cli/modules/backlog/module-package.yaml b/src/specfact_cli/modules/backlog/module-package.yaml index 8c881e51..fa7d0c1c 100644 --- a/src/specfact_cli/modules/backlog/module-package.yaml +++ b/src/specfact_cli/modules/backlog/module-package.yaml @@ -1,5 +1,5 @@ name: backlog -version: 0.1.2 +version: 0.1.4 commands: - backlog command_help: @@ -28,5 +28,5 @@ publisher: description: Manage backlog ceremonies, refinement, and dependency insights. license: Apache-2.0 integrity: - checksum: sha256:ef3febe90c62fa24b3da6b6b2f8a7b605c5b544db01aac10e5cfacc807bcca26 - signature: 2b/UNbFlgRQiwJ+LrEQKyoYUi8lxeLGc4hrcTUdCinU9yXa1h6GvYpRkAkzRW8kxNNNsetphuf7hLZ3II3bNDQ== + checksum: sha256:8412dca4ad9c4ae2d2a80280a149b839976c5adc8b7138e94a0e397839baf6bd + signature: xylvv23Fyj+BKSBTwzqAGR/tp5wxGNQ3KLh/pzpR7bpQHpExzygAP75n7psvWwbsyHgOG/lHL6I+UzYCwg7+Dw== diff --git a/src/specfact_cli/modules/backlog/src/commands.py b/src/specfact_cli/modules/backlog/src/commands.py index 3e18eea1..fb71f517 100644 --- a/src/specfact_cli/modules/backlog/src/commands.py +++ b/src/specfact_cli/modules/backlog/src/commands.py @@ -41,7 +41,7 @@ from specfact_cli.backlog.adapters.base import BacklogAdapter from specfact_cli.backlog.ai_refiner import BacklogAIRefiner from specfact_cli.backlog.filters import BacklogFilters -from specfact_cli.backlog.template_detector import TemplateDetector +from specfact_cli.backlog.template_detector import TemplateDetector, get_effective_required_sections from specfact_cli.models.backlog_item import BacklogItem from specfact_cli.models.dor_config import DefinitionOfReady from specfact_cli.models.plan import Product @@ -556,13 +556,51 @@ def _deep_merge(dst: dict[str, Any], src: dict[str, Any]) -> dict[str, Any]: return path +@beartype +def _resolve_backlog_provider_framework(provider: str) -> str | None: + """Resolve configured framework for a backlog provider from backlog-config and mapping files.""" + normalized_provider = provider.strip().lower() + if not normalized_provider: + return None + + cfg, _path = _load_backlog_module_config_file() + backlog_config = cfg.get("backlog_config") + if isinstance(backlog_config, dict): + providers = backlog_config.get("providers") + if isinstance(providers, dict): + provider_cfg = providers.get(normalized_provider) + if isinstance(provider_cfg, dict): + settings = provider_cfg.get("settings") + if isinstance(settings, dict): + configured = str(settings.get("framework") or "").strip().lower() + if configured: + return configured + + # ADO fallback: read framework from custom mapping file when provider settings are absent. + if normalized_provider == "ado": + mapping_path = Path.cwd() / ".specfact" / "templates" / "backlog" / "field_mappings" / "ado_custom.yaml" + if mapping_path.exists(): + with contextlib.suppress(Exception): + from specfact_cli.backlog.mappers.template_config import FieldMappingConfig + + config = FieldMappingConfig.from_file(mapping_path) + configured = str(config.framework or "").strip().lower() + if configured: + return configured + + return None + + @beartype def _resolve_standup_options( cli_state: str | None, cli_limit: int | None, cli_assignee: str | None, config: dict[str, Any] | None, -) -> tuple[str, int, str | None]: + *, + state_filter_disabled: bool = False, + assignee_filter_disabled: bool = False, +) -> tuple[str | None, int, str | None]: """ Resolve effective state, limit, assignee from CLI options and config. CLI options override config; config overrides built-in defaults. @@ -574,9 +612,9 @@ def _resolve_standup_options( default_assignee = cfg.get("default_assignee") if default_assignee is not None: default_assignee = str(default_assignee) - state = cli_state if cli_state is not None else default_state + state = None if state_filter_disabled else (cli_state if cli_state is not None else default_state) limit = cli_limit if cli_limit is not None else default_limit - assignee = cli_assignee if cli_assignee is not None else default_assignee + assignee = None if assignee_filter_disabled else (cli_assignee if cli_assignee is not None else default_assignee) return (state, limit, assignee) @@ -597,6 +635,37 @@ def _resolve_post_fetch_assignee_filter(adapter: str, assignee: str | None) -> s return assignee +@beartype +def _normalize_state_filter_value(state: str | None) -> str | None: + """Normalize state filter literals and map `any` to no-filter.""" + if state is None: + return None + normalized = BacklogFilters.normalize_filter_value(state) + if normalized in {"any", "all", "*"}: + return None + return state + + +@beartype +def _normalize_assignee_filter_value(assignee: str | None) -> str | None: + """Normalize assignee filter literals and map `any`/`@any` to no-filter.""" + if assignee is None: + return None + normalized = BacklogFilters.normalize_filter_value(assignee.lstrip("@")) + if normalized in {"any", "all", "*"}: + return None + return assignee + + +@beartype +def _is_filter_disable_literal(value: str | None) -> bool: + """Return True when CLI filter literal explicitly disables filtering.""" + if value is None: + return False + normalized = BacklogFilters.normalize_filter_value(value.lstrip("@")) + return normalized in {"any", "all", "*"} + + @beartype def _split_assigned_unassigned(items: list[BacklogItem]) -> tuple[list[BacklogItem], list[BacklogItem]]: """Split items into assigned and unassigned (assignees empty or None).""" @@ -885,6 +954,67 @@ def _resolve_daily_mode_state( return effective_state +@beartype +def _format_daily_scope_summary( + *, + mode: str, + cli_state: str | None, + effective_state: str | None, + cli_assignee: str | None, + effective_assignee: str | None, + cli_limit: int | None, + effective_limit: int, + issue_id: str | None, + labels: list[str] | str | None, + sprint: str | None, + iteration: str | None, + release: str | None, + first_issues: int | None, + last_issues: int | None, +) -> str: + """Build a compact scope summary for daily output with explicit/default source markers.""" + + def _source(*, cli_value: object | None, disabled: bool = False) -> str: + if disabled: + return "disabled by --id" + if cli_value is not None: + return "explicit" + return "default" + + scope_parts: list[str] = [f"mode={mode} (explicit)"] + + state_disabled = issue_id is not None and cli_state is None + state_value = effective_state if effective_state else "โ€”" + scope_parts.append(f"state={state_value} ({_source(cli_value=cli_state, disabled=state_disabled)})") + + assignee_disabled = issue_id is not None and cli_assignee is None + assignee_value = effective_assignee if effective_assignee else "โ€”" + scope_parts.append(f"assignee={assignee_value} ({_source(cli_value=cli_assignee, disabled=assignee_disabled)})") + + limit_source = _source(cli_value=cli_limit) + if first_issues is not None or last_issues is not None: + limit_source = "disabled by issue window" + scope_parts.append(f"limit={effective_limit} ({limit_source})") + + if issue_id is not None: + scope_parts.append("id=" + issue_id + " (explicit)") + if labels: + labels_value = ", ".join(labels) if isinstance(labels, list) else labels + scope_parts.append("labels=" + labels_value + " (explicit)") + if sprint: + scope_parts.append("sprint=" + sprint + " (explicit)") + if iteration: + scope_parts.append("iteration=" + iteration + " (explicit)") + if release: + scope_parts.append("release=" + release + " (explicit)") + if first_issues is not None: + scope_parts.append(f"first_issues={first_issues} (explicit)") + if last_issues is not None: + scope_parts.append(f"last_issues={last_issues} (explicit)") + + return "Applied filters: " + ", ".join(scope_parts) + + @beartype def _has_policy_failure(row: dict[str, Any]) -> bool: """Return True when row indicates a policy failure signal.""" @@ -1397,18 +1527,30 @@ def _build_refine_export_content( "For import readiness: the refined artifact (`--import-from-tmp`) must not include this instruction block; " "it should contain only the `## Item N:` sections and refined fields.\n\n" ) + export_content += ( + "Import contract: **ID** is mandatory in every item block and must remain unchanged from export; " + "ID lookup drives update mapping during `--import-from-tmp`.\n\n" + ) export_content += "**Refinement Rules (same as interactive mode):**\n" export_content += "1. Preserve all original requirements, scope, and technical details\n" export_content += "2. Do NOT add new features or change the scope\n" - export_content += "3. Transform content to match the target template structure\n" - export_content += "4. If required information is missing, use a Markdown checkbox: `- [ ] describe what's needed`\n" + export_content += "3. Do NOT summarize, shorten, or drop details; keep full detail and intent\n" + export_content += "4. Transform content to match the target template structure\n" + export_content += "5. Story text must be explicit, specific, and unambiguous (SMART-style)\n" + export_content += "6. If required information is missing, use a Markdown checkbox: `- [ ] describe what's needed`\n" + export_content += ( + "7. If information is conflicting or ambiguous, add a `[NOTES]` section at the end explaining ambiguity\n" + ) + export_content += "8. Use markdown headings for sections (`## Section Name`)\n" + export_content += "9. Include story points, business value, priority, and work item type when available\n" + export_content += "10. For high-complexity stories, suggest splitting when appropriate\n" + export_content += "11. Follow provider-aware formatting guidance listed per item\n\n" + export_content += "**Template Execution Rules (mandatory):**\n" export_content += ( - "5. If information is conflicting or ambiguous, add a `[NOTES]` section at the end explaining ambiguity\n" + "1. Use `Target Template`, `Required Sections`, and `Optional Sections` as the exact structure contract\n" ) - export_content += "6. Use markdown headings for sections (`## Section Name`)\n" - export_content += "7. Include story points, business value, priority, and work item type when available\n" - export_content += "8. For high-complexity stories, suggest splitting when appropriate\n" - export_content += "9. Follow provider-aware formatting guidance listed per item\n\n" + export_content += "2. Keep all original requirements and constraints; do not silently drop details\n" + export_content += "3. Improve specificity and testability; avoid generic summaries that lose intent\n\n" export_content += "**Expected Output Scaffold (ordered):**\n" export_content += "```markdown\n" export_content += "## Work Item Properties / Metadata\n" @@ -1463,7 +1605,8 @@ def _build_refine_export_content( export_content += "\n**Provider-aware formatting**:\n" export_content += "- GitHub: Use markdown headings in body (`## Section Name`).\n" export_content += ( - "- ADO: Use markdown headings in body; adapter maps to provider fields during writeback.\n" + "- ADO: Keep metadata (Story Points/Business Value/Priority/Work Item Type) in `**Metrics**`; " + "do not add those as body headings. Keep description narrative in body markdown.\n" ) if item.story_points is not None or item.business_value is not None or item.priority is not None: @@ -1509,6 +1652,42 @@ def _resolve_target_template_for_refine_item( direct = registry.get_template(template_id) if direct is not None: return direct + + # Provider steering: user-story-like item types should refine toward user story templates, + # not generic provider work-item/enabler templates. + if normalized_adapter in {"ado", "github"}: + normalized_tokens: set[str] = set() + + work_item_type = (item.work_item_type or "").strip() + if work_item_type: + normalized_tokens.add(work_item_type.lower()) + + if normalized_adapter == "ado": + provider_fields = item.provider_fields.get("fields") + if isinstance(provider_fields, dict): + provider_type = str(provider_fields.get("System.WorkItemType") or "").strip().lower() + if provider_type: + normalized_tokens.add(provider_type) + elif normalized_adapter == "github": + provider_issue_type = item.provider_fields.get("issue_type") + if isinstance(provider_issue_type, str) and provider_issue_type.strip(): + normalized_tokens.add(provider_issue_type.strip().lower()) + normalized_tokens.update(tag.strip().lower() for tag in item.tags if isinstance(tag, str) and tag.strip()) + + is_user_story_like = bool( + normalized_tokens.intersection({"user story", "story", "product backlog item", "pbi"}) + ) + if is_user_story_like: + preferred_ids = ( + ["scrum_user_story_v1", "user_story_v1"] + if normalized_framework == "scrum" + else ["user_story_v1", "scrum_user_story_v1"] + ) + for preferred_id in preferred_ids: + preferred = registry.get_template(preferred_id) + if preferred is not None: + return preferred + detection_result = detector.detect_template( item, provider=normalized_adapter, @@ -1824,6 +2003,47 @@ def _build_adapter_kwargs( return kwargs +@beartype +def _load_ado_framework_template_config(framework: str) -> dict[str, Any]: + """ + Load built-in ADO field mapping template config for a framework. + + Returns a dict with keys: framework, field_mappings, work_item_type_mappings. + Falls back to ado_default.yaml when framework-specific file is unavailable. + """ + normalized = (framework or "default").strip().lower() or "default" + candidates = [f"ado_{normalized}.yaml", "ado_default.yaml"] + + candidate_roots: list[Path] = [] + with contextlib.suppress(Exception): + from specfact_cli.utils.ide_setup import find_package_resources_path + + packaged = find_package_resources_path("specfact_cli", "resources/templates/backlog/field_mappings") + if packaged and packaged.exists(): + candidate_roots.append(packaged) + + repo_root = Path(__file__).parent.parent.parent.parent.parent.parent + candidate_roots.append(repo_root / "resources" / "templates" / "backlog" / "field_mappings") + + for root in candidate_roots: + if not root.exists(): + continue + for filename in candidates: + file_path = root / filename + if file_path.exists(): + with contextlib.suppress(Exception): + from specfact_cli.backlog.mappers.template_config import FieldMappingConfig + + cfg = FieldMappingConfig.from_file(file_path) + return cfg.model_dump() + + return { + "framework": "default", + "field_mappings": {}, + "work_item_type_mappings": {}, + } + + def _extract_body_from_block(block: str) -> str: """ Extract **Body** content from a refined export block, handling nested fenced code. @@ -1875,10 +2095,14 @@ def _parse_refined_export_markdown(content: str) -> dict[str, dict[str, Any]]: business_value?, priority?). """ result: dict[str, dict[str, Any]] = {} - blocks = re.split(r"\n## Item \d+:", content) - for block in blocks: - block = block.strip() - if not block or block.startswith("# SpecFact") or "**ID**:" not in block: + item_block_pattern = re.compile( + r"(?:^|\n)## Item \d+:\s*(?P<title>[^\n]*)\n(?P<body>.*?)(?=(?:\n## Item \d+:)|\Z)", + re.DOTALL, + ) + for match in item_block_pattern.finditer(content): + block_title = match.group("title").strip() + block = match.group("body").strip() + if not block or "**ID**:" not in block: continue id_match = re.search(r"\*\*ID\*\*:\s*(.+?)(?:\n|$)", block) if not id_match: @@ -1894,9 +2118,8 @@ def _parse_refined_export_markdown(content: str) -> dict[str, dict[str, Any]]: else: fields["acceptance_criteria"] = None - first_line = block.split("\n")[0].strip() if block else "" - if first_line and not first_line.startswith("**"): - fields["title"] = first_line + if block_title: + fields["title"] = block_title if "Story Points:" in block: sp_match = re.search(r"Story Points:\s*(\d+)", block) @@ -1915,6 +2138,107 @@ def _parse_refined_export_markdown(content: str) -> dict[str, dict[str, Any]]: return result +_CONTENT_LOSS_STOPWORDS = { + "the", + "and", + "for", + "with", + "from", + "that", + "this", + "into", + "your", + "you", + "are", + "was", + "were", + "will", + "shall", + "must", + "can", + "should", + "have", + "has", + "had", + "not", + "but", + "all", + "any", + "our", + "out", + "use", + "using", + "used", + "need", + "needs", + "item", + "story", + "description", + "acceptance", + "criteria", + "work", + "points", + "value", + "priority", +} + + +@beartype +@require(lambda text: isinstance(text, str), "text must be string") +@ensure(lambda result: isinstance(result, set), "Must return set") +def _extract_content_terms(text: str) -> set[str]: + """Extract meaningful lowercase terms from narrative text for loss checks.""" + tokens = re.findall(r"[A-Za-z0-9][A-Za-z0-9_-]{2,}", text.lower()) + return {token for token in tokens if token not in _CONTENT_LOSS_STOPWORDS} + + +@beartype +@require(lambda original: isinstance(original, str), "original must be string") +@require(lambda refined: isinstance(refined, str), "refined must be string") +@ensure(lambda result: isinstance(result, tuple) and len(result) == 2, "Must return (bool, str)") +def _detect_significant_content_loss(original: str, refined: str) -> tuple[bool, str]: + """ + Detect likely silent content loss (summarization/truncation) in refined body. + + Returns (has_loss, reason). Conservative thresholds aim to catch substantial + detail drops while allowing normal structural cleanup. + """ + original_text = original.strip() + refined_text = refined.strip() + if not original_text: + return (False, "") + if not refined_text: + return (True, "refined description is empty") + + original_len = len(original_text) + refined_len = len(refined_text) + length_ratio = refined_len / max(1, original_len) + + original_terms = _extract_content_terms(original_text) + if not original_terms: + # If original has no meaningful terms, rely only on empty/non-empty check above. + return (False, "") + + refined_terms = _extract_content_terms(refined_text) + retained_terms = len(original_terms.intersection(refined_terms)) + retention_ratio = retained_terms / len(original_terms) + + # Strong signal of summarization/loss: body is much shorter and lost many terms. + if length_ratio < 0.65 and retention_ratio < 0.60: + reason = ( + f"length ratio {length_ratio:.2f} and content-term retention {retention_ratio:.2f} " + "(likely summarized/truncated)" + ) + return (True, reason) + + # Extremely aggressive shrink, even if wording changed heavily. + if length_ratio < 0.45: + reason = f"length ratio {length_ratio:.2f} (refined description is much shorter than original)" + return (True, reason) + + return (False, "") + + @beartype @require(lambda content: isinstance(content, str), "Refinement output must be a string") @ensure(lambda result: isinstance(result, dict), "Must return a dict") @@ -2130,12 +2454,13 @@ def _item_needs_refinement( if detection_result.template_id: target = registry.get_template(detection_result.template_id) if detection_result.template_id else None if target and target.required_sections: + required_sections = get_effective_required_sections(item, target) has_checkboxes = bool( re.search(r"^[\s]*- \[[ x]\]", item.body_markdown or "", re.MULTILINE | re.IGNORECASE) ) all_present = all( bool(re.search(rf"^#+\s+{re.escape(s)}\s*$", item.body_markdown or "", re.MULTILINE | re.IGNORECASE)) - for s in target.required_sections + for s in required_sections ) if has_checkboxes and all_present and not detection_result.missing_fields: return False @@ -2152,6 +2477,7 @@ def _fetch_backlog_items( iteration: str | None = None, sprint: str | None = None, release: str | None = None, + issue_id: str | None = None, limit: int | None = None, repo_owner: str | None = None, repo_name: str | None = None, @@ -2173,6 +2499,7 @@ def _fetch_backlog_items( iteration: Filter by iteration path (post-fetch filtering) sprint: Filter by sprint (post-fetch filtering) release: Filter by release (post-fetch filtering) + issue_id: Filter by exact issue/work-item ID limit: Maximum number of items to fetch Returns: @@ -2222,15 +2549,19 @@ def _fetch_backlog_items( msg = f"Adapter {adapter_name} does not implement BacklogAdapter interface" raise NotImplementedError(msg) + normalized_state = _normalize_state_filter_value(state) + normalized_assignee = _normalize_assignee_filter_value(assignee) + # Create BacklogFilters from parameters filters = BacklogFilters( - assignee=assignee, - state=state, + assignee=normalized_assignee, + state=normalized_state, labels=labels, search=search_query, iteration=iteration, sprint=sprint, release=release, + issue_id=issue_id, limit=limit, ) @@ -2360,12 +2691,16 @@ def daily( assignee: str | None = typer.Option( None, "--assignee", - help="Filter by assignee (e.g. 'me' or username). Only matching items are listed.", + help="Filter by assignee (e.g. 'me' or username). Use 'any' to disable assignee filtering.", ), search: str | None = typer.Option( None, "--search", "-s", help="Search query to filter backlog items (provider-specific syntax)" ), - state: str | None = typer.Option(None, "--state", help="Filter by state (e.g. open, closed, Active)"), + state: str | None = typer.Option( + None, + "--state", + help="Filter by state (e.g. open, closed, Active). Use 'any' to disable state filtering.", + ), labels: list[str] | None = typer.Option(None, "--labels", "--tags", help="Filter by labels/tags"), release: str | None = typer.Option(None, "--release", help="Filter by release identifier"), issue_id: str | None = typer.Option( @@ -2509,14 +2844,29 @@ def daily( if normalized_mode not in {"scrum", "kanban", "safe"}: console.print("[red]Invalid --mode. Use one of: scrum, kanban, safe.[/red]") raise typer.Exit(1) + normalized_cli_state = _normalize_state_filter_value(state) + normalized_cli_assignee = _normalize_assignee_filter_value(assignee) + state_filter_disabled = _is_filter_disable_literal(state) + assignee_filter_disabled = _is_filter_disable_literal(assignee) effective_state, effective_limit, effective_assignee = _resolve_standup_options( - state, limit, assignee, standup_config + normalized_cli_state, + limit, + normalized_cli_assignee, + standup_config, + state_filter_disabled=state_filter_disabled, + assignee_filter_disabled=assignee_filter_disabled, ) effective_state = _resolve_daily_mode_state( mode=normalized_mode, - cli_state=state, + cli_state=normalized_cli_state, effective_state=effective_state, ) + if issue_id is not None: + # ID-specific lookup should not be constrained by implicit standup defaults. + if normalized_cli_state is None: + effective_state = None + if normalized_cli_assignee is None: + effective_assignee = None fetch_limit = _resolve_daily_fetch_limit( effective_limit, first_issues=first_issues, @@ -2534,6 +2884,7 @@ def daily( assignee=effective_assignee, labels=labels, release=release, + issue_id=issue_id, limit=fetch_limit, iteration=iteration, sprint=sprint, @@ -2566,6 +2917,27 @@ def daily( except ValueError as exc: console.print(f"[red]{exc}.[/red]") raise typer.Exit(1) from exc + + console.print( + "[dim]" + + _format_daily_scope_summary( + mode=normalized_mode, + cli_state=state, + effective_state=effective_state, + cli_assignee=assignee, + effective_assignee=effective_assignee, + cli_limit=limit, + effective_limit=effective_limit, + issue_id=issue_id, + labels=labels, + sprint=sprint, + iteration=iteration, + release=release, + first_issues=first_issues, + last_issues=last_issues, + ) + + "[/dim]" + ) if display_limit is not None and len(filtered) > display_limit: filtered = filtered[:display_limit] @@ -2800,12 +3172,14 @@ def refine( None, "--labels", "--tags", help="Filter by labels/tags (can specify multiple)" ), state: str | None = typer.Option( - None, "--state", help="Filter by state (case-insensitive, e.g., 'open', 'closed', 'Active', 'New')" + None, + "--state", + help="Filter by state (case-insensitive, e.g., 'open', 'closed', 'Active', 'New'). Use 'any' to disable state filtering.", ), assignee: str | None = typer.Option( None, "--assignee", - help="Filter by assignee (case-insensitive). GitHub: login or @username. ADO: displayName, uniqueName, or mail", + help="Filter by assignee (case-insensitive). GitHub: login or @username. ADO: displayName, uniqueName, or mail. Use 'any' to disable assignee filtering.", ), # Iteration/sprint filters iteration: str | None = typer.Option( @@ -2958,6 +3332,8 @@ def refine( """ try: # Show initialization progress to provide feedback during setup + normalized_state_filter = _normalize_state_filter_value(state) + normalized_assignee_filter = _normalize_assignee_filter_value(assignee) with Progress( SpinnerColumn(), TextColumn("[progress.description]{task.description}"), @@ -3052,6 +3428,8 @@ def refine( normalized_adapter = adapter.lower() if adapter else None normalized_framework = framework.lower() if framework else None normalized_persona = persona.lower() if persona else None + if normalized_adapter and not normalized_framework: + normalized_framework = _resolve_backlog_provider_framework(normalized_adapter) # Validate adapter-specific required parameters (use same resolution as daily: CLI > env > config > git) validate_task = init_progress.add_task("[cyan]Validating adapter configuration...[/cyan]", total=None) @@ -3127,11 +3505,12 @@ def refine( adapter, search_query=search, labels=labels, - state=state, - assignee=assignee, + state=normalized_state_filter, + assignee=normalized_assignee_filter, iteration=iteration, sprint=sprint, release=release, + issue_id=issue_id, limit=fetch_limit, repo_owner=repo_owner, repo_name=repo_name, @@ -3146,10 +3525,10 @@ def refine( if not items: # Provide helpful message when no items found, especially if filters were used filter_info = [] - if state: - filter_info.append(f"state={state}") - if assignee: - filter_info.append(f"assignee={assignee}") + if normalized_state_filter: + filter_info.append(f"state={normalized_state_filter}") + if normalized_assignee_filter: + filter_info.append(f"assignee={normalized_assignee_filter}") if iteration: filter_info.append(f"iteration={iteration}") if sprint: @@ -3249,12 +3628,21 @@ def refine( normalized_persona=normalized_persona, ) if target_template is not None: + effective_required_sections = get_effective_required_sections(export_item, target_template) + effective_optional_sections = list(target_template.optional_sections or []) + if export_item.provider.lower() == "ado": + ado_structured_optional_sections = {"Area Path", "Iteration Path"} + effective_optional_sections = [ + section + for section in effective_optional_sections + if section not in ado_structured_optional_sections + ] template_guidance_by_item_id[export_item.id] = { "template_id": target_template.template_id, "name": target_template.name, "description": target_template.description, - "required_sections": list(target_template.required_sections or []), - "optional_sections": list(target_template.optional_sections or []), + "required_sections": list(effective_required_sections), + "optional_sections": effective_optional_sections, } export_content = _build_refine_export_content( adapter, @@ -3300,8 +3688,21 @@ def refine( if item.id not in parsed_by_id: continue data = parsed_by_id[item.id] - body = data.get("body_markdown", item.body_markdown or "") - item.body_markdown = body if body is not None else (item.body_markdown or "") + original_body = item.body_markdown or "" + body = data.get("body_markdown", original_body) + refined_body = body if body is not None else original_body + has_loss, loss_reason = _detect_significant_content_loss(original_body, refined_body) + if has_loss: + console.print( + "[bold red]โœ—[/bold red] Refined content for " + f"item {item.id} appears to drop important detail ({loss_reason})." + ) + console.print( + "[dim]Refinement must preserve full story detail and requirements. " + "Update the tmp file with complete content and retry import.[/dim]" + ) + raise typer.Exit(1) + item.body_markdown = refined_body if "acceptance_criteria" in data: item.acceptance_criteria = data["acceptance_criteria"] if data.get("title"): @@ -3314,6 +3715,13 @@ def refine( item.priority = data["priority"] updated_items.append(item) + if parsed_by_id and not updated_items: + console.print("[bold red]โœ—[/bold red] None of the refined item IDs matched fetched backlog items.") + console.print( + "[dim]Keep each exported `**ID**` unchanged in every `## Item N:` block, then retry import.[/dim]" + ) + raise typer.Exit(1) + if not write: console.print(f"[green]Would update {len(updated_items)} item(s)[/green]") console.print("[dim]Run with --write to apply changes to the backlog[/dim]") @@ -3465,6 +3873,25 @@ def _on_write_comment_progress(index: int, total: int, item: BacklogItem) -> Non detection_result = detector.detect_template( item, provider=normalized_adapter, framework=normalized_framework, persona=normalized_persona ) + resolved_target_template = _resolve_target_template_for_refine_item( + item, + detector=detector, + registry=registry, + template_id=template_id, + normalized_adapter=normalized_adapter, + normalized_framework=normalized_framework, + normalized_persona=normalized_persona, + ) + if ( + template_id is None + and resolved_target_template is not None + and detection_result.template_id != resolved_target_template.template_id + ): + detection_result.template_id = resolved_target_template.template_id + detection_result.confidence = 0.6 * detector._score_structural_fit( + item, resolved_target_template + ) + 0.4 * detector._score_pattern_fit(item, resolved_target_template) + detection_result.missing_fields = detector._find_missing_fields(item, resolved_target_template) if detection_result.template_id: template_id_str = detection_result.template_id @@ -3487,7 +3914,8 @@ def _on_write_comment_progress(index: int, total: int, item: BacklogItem) -> Non ) # Check if all required sections are present all_sections_present = True - for section in target_template_for_check.required_sections: + required_sections_for_check = get_effective_required_sections(item, target_template_for_check) + for section in required_sections_for_check: # Look for section heading (## Section Name or ### Section Name) section_pattern = rf"^#+\s+{re.escape(section)}\s*$" if not re.search(section_pattern, item.body_markdown, re.MULTILINE | re.IGNORECASE): @@ -3522,23 +3950,11 @@ def _on_write_comment_progress(index: int, total: int, item: BacklogItem) -> Non console.print(f"[yellow]Template {template_id} not found, using auto-detection[/yellow]") elif detection_result.template_id: target_template = registry.get_template(detection_result.template_id) - else: - # Use priority-based template resolution - # Use normalized values for case-insensitive template matching - target_template = registry.resolve_template( - provider=normalized_adapter, framework=normalized_framework, persona=normalized_persona - ) + if target_template is None: + target_template = resolved_target_template if target_template: resolved_id = target_template.template_id console.print(f"[yellow]No template detected, using resolved template: {resolved_id}[/yellow]") - else: - # Fallback: Use first available template as default - templates = registry.list_templates(scope="corporate") - if templates: - target_template = templates[0] - console.print( - f"[yellow]No template resolved, using default: {target_template.template_id}[/yellow]" - ) if not target_template: console.print("[yellow]No template available for refinement[/yellow]") @@ -3572,8 +3988,9 @@ def _on_write_comment_progress(index: int, total: int, item: BacklogItem) -> Non # Always show acceptance criteria if it's a required section, even if empty # This helps copilot understand what fields need to be added + required_sections_for_preview = get_effective_required_sections(item, target_template) is_acceptance_criteria_required = ( - target_template.required_sections and "Acceptance Criteria" in target_template.required_sections + bool(required_sections_for_preview) and "Acceptance Criteria" in required_sections_for_preview ) if is_acceptance_criteria_required or item.acceptance_criteria: console.print("\n[bold]Acceptance Criteria:[/bold]") @@ -3875,6 +4292,7 @@ def init_config( "adapter": "ado", "project_id": "", "settings": { + "framework": "default", "field_mapping_file": ".specfact/templates/backlog/field_mappings/ado_custom.yaml", }, }, @@ -3902,6 +4320,11 @@ def map_fields( ado_base_url: str | None = typer.Option( None, "--ado-base-url", help="Azure DevOps base URL (defaults to https://dev.azure.com)" ), + ado_framework: str | None = typer.Option( + None, + "--ado-framework", + help="ADO process style/framework for mapping/template steering (scrum, agile, safe, kanban, default)", + ), provider: list[str] = typer.Option( [], "--provider", help="Provider(s) to configure: ado, github (repeatable)", show_default=False ), @@ -4696,28 +5119,7 @@ def _find_potential_match(canonical_field: str, available_fields: list[dict[str, # Sort fields by reference name relevant_fields.sort(key=lambda f: f.get("referenceName", "")) - # Canonical fields to map - canonical_fields = { - "description": "Description", - "acceptance_criteria": "Acceptance Criteria", - "story_points": "Story Points", - "business_value": "Business Value", - "priority": "Priority", - "work_item_type": "Work Item Type", - } - - # Load default mappings from AdoFieldMapper - from specfact_cli.backlog.mappers.ado_mapper import AdoFieldMapper - - default_mappings = AdoFieldMapper.DEFAULT_FIELD_MAPPINGS - # Reverse default mappings: canonical -> list of ADO fields - default_mappings_reversed: dict[str, list[str]] = {} - for ado_field, canonical in default_mappings.items(): - if canonical not in default_mappings_reversed: - default_mappings_reversed[canonical] = [] - default_mappings_reversed[canonical].append(ado_field) - - # Handle --reset flag + # Handle --reset flag / existing custom mapping first (used for framework defaults too) current_dir = Path.cwd() custom_mapping_file = current_dir / ".specfact" / "templates" / "backlog" / "field_mappings" / "ado_custom.yaml" @@ -4743,6 +5145,112 @@ def _find_potential_match(canonical_field: str, available_fields: list[dict[str, except Exception as e: console.print(f"[yellow]โš [/yellow] Failed to load existing mapping: {e}") + try: + import questionary # type: ignore[reportMissingImports] + except ImportError: + console.print( + "[red]Interactive field mapping requires the 'questionary' package. Install with: pip install questionary[/red]" + ) + raise typer.Exit(1) from None + + allowed_frameworks = ["scrum", "agile", "safe", "kanban", "default"] + + def _detect_ado_framework_from_work_item_types() -> str | None: + work_item_types_url = f"{base_url}/{ado_org}/{ado_project}/_apis/wit/workitemtypes?api-version=7.1" + try: + response = requests.get(work_item_types_url, headers=headers, timeout=30) + response.raise_for_status() + payload = response.json() + nodes = payload.get("value", []) + names = { + str(node.get("name") or "").strip().lower() + for node in nodes + if isinstance(node, dict) and str(node.get("name") or "").strip() + } + if not names: + return None + if "product backlog item" in names: + return "scrum" + if "capability" in names: + return "safe" + if "user story" in names: + return "agile" + if "issue" in names: + return "kanban" + except requests.exceptions.RequestException: + return None + return None + + selected_framework = (ado_framework or "").strip().lower() + if selected_framework and selected_framework not in allowed_frameworks: + console.print( + f"[red]Error:[/red] Invalid --ado-framework '{ado_framework}'. " + f"Expected one of: {', '.join(allowed_frameworks)}" + ) + raise typer.Exit(1) + + detected_framework = _detect_ado_framework_from_work_item_types() + existing_framework = ( + (existing_config.framework if existing_config else "").strip().lower() if existing_config else "" + ) + framework_default = selected_framework or detected_framework or existing_framework or "default" + + if not selected_framework: + framework_choices: list[Any] = [] + for option in allowed_frameworks: + label = option + if option == detected_framework: + label = f"{option} (detected)" + elif option == existing_framework: + label = f"{option} (current)" + framework_choices.append(questionary.Choice(title=label, value=option)) + try: + picked_framework = questionary.select( + "Select ADO process style/framework for mapping and refinement templates", + choices=framework_choices, + default=framework_default, + use_arrow_keys=True, + use_jk_keys=False, + ).ask() + selected_framework = str(picked_framework or framework_default).strip().lower() + except (KeyboardInterrupt, EOFError): + console.print("\n[yellow]Selection cancelled.[/yellow]") + raise typer.Exit(0) from None + + if selected_framework not in allowed_frameworks: + selected_framework = "default" + + console.print(f"[dim]Using ADO framework:[/dim] {selected_framework}") + + framework_template = _load_ado_framework_template_config(selected_framework) + framework_field_mappings = framework_template.get("field_mappings", {}) + framework_work_item_type_mappings = framework_template.get("work_item_type_mappings", {}) + + # Canonical fields to map + canonical_fields = { + "description": "Description", + "acceptance_criteria": "Acceptance Criteria", + "story_points": "Story Points", + "business_value": "Business Value", + "priority": "Priority", + "work_item_type": "Work Item Type", + } + + # Load default mappings from AdoFieldMapper + from specfact_cli.backlog.mappers.ado_mapper import AdoFieldMapper + + default_mappings = ( + framework_field_mappings + if isinstance(framework_field_mappings, dict) and framework_field_mappings + else AdoFieldMapper.DEFAULT_FIELD_MAPPINGS + ) + # Reverse default mappings: canonical -> list of ADO fields + default_mappings_reversed: dict[str, list[str]] = {} + for ado_field, canonical in default_mappings.items(): + if canonical not in default_mappings_reversed: + default_mappings_reversed[canonical] = [] + default_mappings_reversed[canonical].append(ado_field) + # Build combined mapping: existing > default (checking which defaults exist in fetched fields) combined_mapping: dict[str, str] = {} # Get list of available ADO field reference names @@ -4776,14 +5284,6 @@ def _find_potential_match(canonical_field: str, available_fields: list[dict[str, combined_mapping.update(existing_mapping) # Interactive mapping - try: - import questionary # type: ignore[reportMissingImports] - except ImportError: - console.print( - "[red]Interactive field mapping requires the 'questionary' package. Install with: pip install questionary[/red]" - ) - raise typer.Exit(1) from None - console.print() console.print(Panel("[bold cyan]Interactive Field Mapping[/bold cyan]", border_style="cyan")) console.print("[dim]Use โ†‘โ†“ to navigate, โŽ to select. Map ADO fields to canonical field names.[/dim]") @@ -4880,11 +5380,15 @@ def _find_potential_match(canonical_field: str, available_fields: list[dict[str, # Preserve existing work_item_type_mappings if they exist # This prevents erasing custom work item type mappings when updating field mappings - work_item_type_mappings = existing_work_item_type_mappings.copy() if existing_work_item_type_mappings else {} + work_item_type_mappings = ( + dict(framework_work_item_type_mappings) if isinstance(framework_work_item_type_mappings, dict) else {} + ) + if existing_work_item_type_mappings: + work_item_type_mappings.update(existing_work_item_type_mappings) # Create FieldMappingConfig config = FieldMappingConfig( - framework=existing_config.framework if existing_config else "default", + framework=selected_framework, field_mappings=final_mapping, work_item_type_mappings=work_item_type_mappings, ) @@ -4904,6 +5408,7 @@ def _find_potential_match(canonical_field: str, available_fields: list[dict[str, "field_mapping_file": ".specfact/templates/backlog/field_mappings/ado_custom.yaml", "ado_org": ado_org, "ado_project": ado_project, + "framework": selected_framework, }, project_id=f"{ado_org}/{ado_project}" if ado_org and ado_project else None, adapter="ado", diff --git a/src/specfact_cli/registry/module_installer.py b/src/specfact_cli/registry/module_installer.py index 24332ac3..481d8bf1 100644 --- a/src/specfact_cli/registry/module_installer.py +++ b/src/specfact_cli/registry/module_installer.py @@ -422,7 +422,10 @@ def install_module( with tarfile.open(archive_path, "r:gz") as archive: members = archive.getmembers() _validate_archive_members(members, extract_root) - archive.extractall(path=extract_root, members=members) + try: + archive.extractall(path=extract_root, members=members, filter="data") + except TypeError: + archive.extractall(path=extract_root, members=members) candidate_dirs = [p for p in extract_root.rglob("module-package.yaml") if p.is_file()] if not candidate_dirs: diff --git a/src/specfact_cli/runtime.py b/src/specfact_cli/runtime.py index 7e7d96be..94de6ff7 100644 --- a/src/specfact_cli/runtime.py +++ b/src/specfact_cli/runtime.py @@ -11,6 +11,7 @@ import json import logging import os +import sys from enum import StrEnum from logging.handlers import RotatingFileHandler from typing import Any @@ -196,6 +197,42 @@ def get_configured_console() -> Console: return _console_cache[mode] +@beartype +def refresh_loaded_module_consoles() -> int: + """ + Rebind loaded module-level `console` variables to the current configured Console. + + This prevents stale Rich Console instances from referencing closed CliRunner capture + streams across sequential command invocations in tests. + + Returns: + Number of module console attributes refreshed. + """ + from rich.console import Console as RichConsole + + refreshed = 0 + fresh_console = get_configured_console() + for module in list(sys.modules.values()): + if module is None: + continue + module_name = getattr(module, "__name__", "") + if not isinstance(module_name, str) or not module_name.startswith("specfact_cli."): + continue + if not hasattr(module, "console"): + continue + try: + current_console = module.console + except Exception: + continue + if isinstance(current_console, RichConsole): + try: + module.console = fresh_console + refreshed += 1 + except Exception: + continue + return refreshed + + def _get_debug_caller() -> str: """Return module:function for the caller of debug_print/debug_log_operation (first frame outside runtime).""" for frame_info in inspect.stack(): diff --git a/tests/integration/backlog/test_ado_markdown_rendering.py b/tests/integration/backlog/test_ado_markdown_rendering.py index 5be5d02e..ba0a6a45 100644 --- a/tests/integration/backlog/test_ado_markdown_rendering.py +++ b/tests/integration/backlog/test_ado_markdown_rendering.py @@ -44,9 +44,10 @@ def test_update_backlog_item_with_markdown_format(self, mock_patch: MagicMock) - title="Test Work Item", body_markdown="# Title\n\nThis is **bold** text.", state="Active", + acceptance_criteria="- [ ] criterion one", ) - result = adapter.update_backlog_item(item, update_fields=["body_markdown"]) + result = adapter.update_backlog_item(item, update_fields=["body_markdown", "acceptance_criteria"]) # Verify API was called assert mock_patch.called @@ -75,6 +76,27 @@ def test_update_backlog_item_with_markdown_format(self, mock_patch: MagicMock) - desc_idx = operations.index(description_op) assert format_idx < desc_idx, "Format operation should come before description operation" + ac_format_op = next( + ( + op + for op in operations + if "/multilineFieldsFormat/" in op.get("path", "") and "AcceptanceCriteria" in op.get("path", "") + ), + None, + ) + ac_replace_op = next( + ( + op + for op in operations + if op.get("path", "").startswith("/fields/") and "AcceptanceCriteria" in op.get("path", "") + ), + None, + ) + assert ac_format_op is not None + assert ac_format_op["value"] == "Markdown" + assert ac_replace_op is not None + assert ac_replace_op["value"] == "- [ ] criterion one" + # Verify result assert result.id == "1" assert result.provider == "ado" diff --git a/tests/unit/adapters/test_ado_backlog_adapter.py b/tests/unit/adapters/test_ado_backlog_adapter.py index 76a63a92..d920cef6 100644 --- a/tests/unit/adapters/test_ado_backlog_adapter.py +++ b/tests/unit/adapters/test_ado_backlog_adapter.py @@ -9,11 +9,13 @@ from unittest.mock import MagicMock, patch import pytest +import requests from beartype import beartype from specfact_cli.adapters.ado import AdoAdapter from specfact_cli.backlog.adapters.base import BacklogAdapter from specfact_cli.backlog.filters import BacklogFilters +from specfact_cli.backlog.mappers.ado_mapper import AdoFieldMapper from specfact_cli.models.backlog_item import BacklogItem @@ -82,6 +84,113 @@ def test_fetch_backlog_items_with_state_filter(self, mock_get: MagicMock, mock_p assert len(items) >= 0 # May be filtered further + @beartype + @patch("specfact_cli.adapters.backlog_base.time.sleep", return_value=None) + @patch("specfact_cli.adapters.ado.requests.post") + @patch("specfact_cli.adapters.ado.requests.get") + def test_fetch_backlog_items_retries_transient_transport_errors( + self, + mock_get: MagicMock, + mock_post: MagicMock, + _mock_sleep: MagicMock, + ) -> None: + """fetch_backlog_items should retry transient WIQL/workitem transport failures.""" + mock_wiql_response = MagicMock() + mock_wiql_response.status_code = 200 + mock_wiql_response.raise_for_status = MagicMock() + mock_wiql_response.json.return_value = {"workItems": [{"id": 1}]} + + mock_get_response = MagicMock() + mock_get_response.status_code = 200 + mock_get_response.raise_for_status = MagicMock() + mock_get_response.json.return_value = { + "value": [ + { + "id": 1, + "url": "https://dev.azure.com/test/project/_apis/wit/workitems/1", + "fields": { + "System.Title": "Retry Item", + "System.Description": "Description 1", + "System.State": "New", + }, + } + ] + } + + mock_post.side_effect = [requests.ConnectionError("connection reset"), mock_wiql_response] + mock_get.side_effect = [requests.ConnectionError("remote closed"), mock_get_response] + + adapter = AdoAdapter(org="test", project="project", api_token="token") + items = adapter.fetch_backlog_items(BacklogFilters(use_current_iteration_default=False)) + + assert len(items) == 1 + assert mock_post.call_count == 2 + assert mock_get.call_count == 2 + + @beartype + @patch("specfact_cli.adapters.ado.requests.post") + @patch("specfact_cli.adapters.ado.requests.get") + def test_fetch_backlog_items_issue_id_uses_direct_lookup( + self, + mock_get: MagicMock, + mock_post: MagicMock, + ) -> None: + """When issue_id is set, adapter should fetch directly by ID and bypass WIQL query path.""" + mock_get_response = MagicMock() + mock_get_response.status_code = 200 + mock_get_response.raise_for_status = MagicMock() + mock_get_response.json.return_value = { + "id": 185, + "url": "https://dev.azure.com/test/project/_apis/wit/workitems/185", + "fields": { + "System.Title": "Fix the error", + "System.State": "New", + "System.Description": "Description", + }, + } + mock_get.return_value = mock_get_response + mock_post.side_effect = AssertionError("WIQL should not be called for direct issue_id lookup") + + adapter = AdoAdapter(org="test", project="project", api_token="token") + adapter._get_current_iteration = MagicMock(side_effect=AssertionError("current iteration lookup not expected")) # type: ignore[method-assign] + items = adapter.fetch_backlog_items(BacklogFilters(issue_id="185")) + + assert len(items) == 1 + assert items[0].id == "185" + assert mock_get.call_count == 1 + assert mock_post.call_count == 0 + first_url = mock_get.call_args.kwargs.get("url", mock_get.call_args.args[0] if mock_get.call_args.args else "") + assert "_apis/wit/workitems/185" in first_url + + @beartype + @patch("specfact_cli.adapters.ado.requests.post") + @patch("specfact_cli.adapters.ado.requests.get") + def test_fetch_backlog_items_issue_id_respects_state_filter( + self, + mock_get: MagicMock, + mock_post: MagicMock, + ) -> None: + """Direct ID lookup still applies explicit post-fetch state filters.""" + mock_get_response = MagicMock() + mock_get_response.status_code = 200 + mock_get_response.raise_for_status = MagicMock() + mock_get_response.json.return_value = { + "id": 185, + "url": "https://dev.azure.com/test/project/_apis/wit/workitems/185", + "fields": { + "System.Title": "Fix the error", + "System.State": "New", + "System.Description": "Description", + }, + } + mock_get.return_value = mock_get_response + mock_post.side_effect = AssertionError("WIQL should not be called for direct issue_id lookup") + + adapter = AdoAdapter(org="test", project="project", api_token="token") + items = adapter.fetch_backlog_items(BacklogFilters(issue_id="185", state="Active")) + + assert items == [] + @beartype @patch("specfact_cli.adapters.ado.requests.patch") def test_update_backlog_item(self, mock_patch: MagicMock) -> None: @@ -112,13 +221,10 @@ def test_update_backlog_item(self, mock_patch: MagicMock) -> None: @beartype @patch("specfact_cli.adapters.ado.requests.patch") - def test_update_backlog_item_multiple_field_mappings_prefers_system_fields(self, mock_patch: MagicMock) -> None: - """Test that update_backlog_item uses System.* fields when multiple mappings exist. - - This test verifies the fix for the bug where reverse_mappings would use - Microsoft.VSTS.Common.* fields (last entry) but ado_fields would use System.* - fields (preferred), causing the membership check to fail and skipping updates. - """ + def test_update_backlog_item_multiple_field_mappings_uses_resolved_write_target( + self, mock_patch: MagicMock + ) -> None: + """Test update_backlog_item uses mapper-resolved write targets for ambiguous canonical fields.""" # Mock ADO API response mock_response = MagicMock() mock_response.json.return_value = { @@ -134,22 +240,23 @@ def test_update_backlog_item_multiple_field_mappings_prefers_system_fields(self, mock_response.raise_for_status = MagicMock() mock_patch.return_value = mock_response - adapter = AdoAdapter(org="test", project="project", api_token="token") - item = BacklogItem( - id="1", - provider="ado", - url="", - title="Test Item", - body_markdown="Description", - state="Active", - acceptance_criteria="Acceptance criteria", - story_points=5, - ) + with patch.dict("os.environ", {}, clear=True): + adapter = AdoAdapter(org="test", project="project", api_token="token") + item = BacklogItem( + id="1", + provider="ado", + url="", + title="Test Item", + body_markdown="Description", + state="Active", + acceptance_criteria="Acceptance criteria", + story_points=5, + ) - # Update with fields that have multiple mappings - result = adapter.update_backlog_item( - item, update_fields=["acceptance_criteria", "story_points", "body_markdown"] - ) + # Update with fields that have multiple mappings + result = adapter.update_backlog_item( + item, update_fields=["acceptance_criteria", "story_points", "body_markdown"] + ) # Verify the update was successful assert result.id == "1" @@ -162,14 +269,12 @@ def test_update_backlog_item_multiple_field_mappings_prefers_system_fields(self, call_args = mock_patch.call_args operations = call_args[1]["json"] # JSON body contains operations - # Verify that System.* fields are used (not Microsoft.VSTS.Common.*) - # This ensures consistency with map_from_canonical preference logic - # Check that System.AcceptanceCriteria is used (not Microsoft.VSTS.Common.AcceptanceCriteria) - # The default mappings have both, but System.* should be preferred + # Verify that mapper-resolved acceptance criteria field is used. acceptance_criteria_ops = [op for op in operations if "AcceptanceCriteria" in op.get("path", "")] if acceptance_criteria_ops: - # Should use System.AcceptanceCriteria (preferred) not Microsoft.VSTS.Common.AcceptanceCriteria - assert any("System.AcceptanceCriteria" in op["path"] for op in acceptance_criteria_ops) + expected_acceptance_field = AdoFieldMapper().resolve_write_target_field("acceptance_criteria") + assert expected_acceptance_field is not None + assert any(expected_acceptance_field in op["path"] for op in acceptance_criteria_ops) # Check that story points field is used (could be either Microsoft.VSTS.Common.StoryPoints # or Microsoft.VSTS.Scheduling.StoryPoints, but should be consistent with map_from_canonical) @@ -178,6 +283,115 @@ def test_update_backlog_item_multiple_field_mappings_prefers_system_fields(self, # Verify story points update was included assert len(story_points_ops) > 0 + @beartype + @patch("specfact_cli.adapters.ado.requests.patch") + def test_update_backlog_item_uses_custom_story_points_field_mapping(self, mock_patch: MagicMock, tmp_path) -> None: + """ADO writeback should use the configured custom story points target field.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "id": 1, + "url": "https://dev.azure.com/test/project/_apis/wit/workitems/1", + "fields": { + "System.Title": "Test Item", + "System.Description": "Description", + "Microsoft.VSTS.Scheduling.StoryPoints": 8, + }, + } + mock_response.raise_for_status = MagicMock() + mock_patch.return_value = mock_response + + custom_mapping_file = tmp_path / "ado_custom.yaml" + custom_mapping_file.write_text( + """ +field_mappings: + Microsoft.VSTS.Scheduling.StoryPoints: story_points +""".strip(), + encoding="utf-8", + ) + + adapter = AdoAdapter(org="test", project="project", api_token="token") + item = BacklogItem( + id="1", + provider="ado", + url="", + title="Test Item", + body_markdown="Description", + state="Active", + story_points=8, + provider_fields={"fields": {"Microsoft.VSTS.Scheduling.StoryPoints": 3}}, + ) + + with patch.dict( + "os.environ", + {"SPECFACT_ADO_CUSTOM_MAPPING": str(custom_mapping_file)}, + clear=False, + ): + adapter.update_backlog_item(item, update_fields=["story_points", "body_markdown"]) + + operations = mock_patch.call_args[1]["json"] + story_points_ops = [ + op for op in operations if op.get("path") == "/fields/Microsoft.VSTS.Scheduling.StoryPoints" + ] + assert len(story_points_ops) == 1 + + @beartype + @patch("specfact_cli.adapters.ado.requests.patch") + def test_create_issue_uses_custom_mapped_fields_and_markdown_multiline_format( + self, mock_patch: MagicMock, tmp_path + ) -> None: + """ADO create_issue should honor custom field mapping and markdown format metadata.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "id": 77, + "url": "https://dev.azure.com/test/project/_apis/wit/workitems/77", + "_links": {"html": {"href": "https://dev.azure.com/test/project/_workitems/edit/77"}}, + } + mock_response.raise_for_status = MagicMock() + mock_patch.return_value = mock_response + + custom_mapping_file = tmp_path / "ado_custom.yaml" + custom_mapping_file.write_text( + """ +field_mappings: + Custom.Description: description + Custom.AcceptanceNotes: acceptance_criteria + Custom.EstimatePoints: story_points + Custom.BacklogPriority: priority +""".strip(), + encoding="utf-8", + ) + + adapter = AdoAdapter(org="test", project="project", api_token="token") + payload = { + "title": "Story with custom mapping", + "description": "## Description\\n\\nMarkdown body", + "description_format": "markdown", + "acceptance_criteria": "- [ ] done", + "story_points": 8, + "priority": 2, + } + + with patch.dict( + "os.environ", + {"SPECFACT_ADO_CUSTOM_MAPPING": str(custom_mapping_file)}, + clear=False, + ): + created = adapter.create_issue("test/project", payload) + + assert created["id"] == "77" + + operations = mock_patch.call_args.kwargs["json"] + assert { + "op": "add", + "path": "/fields/Custom.Description", + "value": "## Description\\n\\nMarkdown body", + } in operations + assert {"op": "add", "path": "/multilineFieldsFormat/Custom.Description", "value": "Markdown"} in operations + assert {"op": "add", "path": "/fields/Custom.AcceptanceNotes", "value": "- [ ] done"} in operations + assert {"op": "add", "path": "/multilineFieldsFormat/Custom.AcceptanceNotes", "value": "Markdown"} in operations + assert {"op": "add", "path": "/fields/Custom.EstimatePoints", "value": 8} in operations + assert {"op": "add", "path": "/fields/Custom.BacklogPriority", "value": 2} in operations + @beartype @patch("specfact_cli.adapters.ado.requests.patch") def test_update_backlog_item_writes_description_and_acceptance_to_separate_fields( @@ -218,7 +432,41 @@ def test_update_backlog_item_writes_description_and_acceptance_to_separate_field assert description_op is not None assert description_op["value"] == "Clean description" assert acceptance_op is not None - assert acceptance_op["value"] == "- criterion" + assert any(op.get("value") == "- criterion" for op in operations if "AcceptanceCriteria" in op.get("path", "")) + + @beartype + @patch("specfact_cli.adapters.ado.requests.patch") + def test_update_backlog_item_strips_leading_description_heading_for_ado(self, mock_patch: MagicMock) -> None: + """ADO description writeback strips a leading '## Description' scaffold heading.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "id": 1, + "url": "https://dev.azure.com/test/project/_apis/wit/workitems/1", + "fields": { + "System.Title": "Story", + "System.Description": "Clean description", + "System.State": "Active", + }, + } + mock_response.raise_for_status = MagicMock() + mock_patch.return_value = mock_response + + adapter = AdoAdapter(org="test", project="project", api_token="token") + item = BacklogItem( + id="1", + provider="ado", + url="", + title="Story", + body_markdown="## Description\n\nClean description", + state="Active", + ) + + adapter.update_backlog_item(item, update_fields=["body_markdown"]) + + operations = mock_patch.call_args[1]["json"] + description_op = next((op for op in operations if op.get("path") == "/fields/System.Description"), None) + assert description_op is not None + assert description_op["value"] == "Clean description" @beartype def test_validate_round_trip(self) -> None: @@ -416,6 +664,26 @@ def test_auth_headers_no_token(self) -> None: headers = adapter._auth_headers() assert headers == {} + @beartype + @patch("specfact_cli.adapters.backlog_base.time.sleep", return_value=None) + @patch("specfact_cli.adapters.ado.requests.get") + def test_get_current_iteration_retries_transient_transport_error( + self, mock_get: MagicMock, _mock_sleep: MagicMock + ) -> None: + """Current iteration lookup retries on transient connection errors.""" + retry_response = MagicMock() + retry_response.status_code = 200 + retry_response.raise_for_status = MagicMock() + retry_response.json.return_value = {"value": [{"path": "Project\\Sprint 1"}]} + + mock_get.side_effect = [requests.ConnectionError("connection reset"), retry_response] + + adapter = AdoAdapter(org="test", project="project", team="Team A", api_token="token") + resolved = adapter._get_current_iteration() + + assert resolved == "Project\\Sprint 1" + assert mock_get.call_count == 2 + @beartype @patch("specfact_cli.adapters.ado.requests.get") def test_get_work_item_comments_follows_continuation_token(self, mock_get: MagicMock) -> None: diff --git a/tests/unit/adapters/test_github_backlog_adapter.py b/tests/unit/adapters/test_github_backlog_adapter.py index 5226bee8..862a94a5 100644 --- a/tests/unit/adapters/test_github_backlog_adapter.py +++ b/tests/unit/adapters/test_github_backlog_adapter.py @@ -9,6 +9,7 @@ from unittest.mock import MagicMock, patch import pytest +import requests from beartype import beartype from specfact_cli.adapters.github import GitHubAdapter @@ -94,6 +95,81 @@ def test_fetch_backlog_items_with_assignee_filter(self, mock_get: MagicMock) -> call_args = mock_get.call_args assert "assignee:alice" in call_args[1]["params"]["q"] + @beartype + @patch("specfact_cli.adapters.github.requests.get") + def test_fetch_backlog_items_does_not_plaintext_refilter_provider_search(self, mock_get: MagicMock) -> None: + """Provider search syntax should not be dropped by local title/body substring filtering.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "items": [ + { + "number": 7, + "html_url": "https://github.com/test/repo/issues/7", + "title": "Fix OAuth callback", + "body": "Investigate callback race condition", + "state": "open", + "assignees": [], + "labels": [{"name": "bug"}], + } + ] + } + mock_response.raise_for_status = MagicMock() + mock_get.return_value = mock_response + + adapter = GitHubAdapter(repo_owner="test", repo_name="repo", api_token="token") + items = adapter.fetch_backlog_items(BacklogFilters(search="label:bug")) + + assert len(items) == 1 + assert items[0].id == "7" + + @beartype + @patch("specfact_cli.adapters.github.requests.get") + def test_fetch_backlog_items_issue_id_uses_direct_lookup(self, mock_get: MagicMock) -> None: + """Issue-id fetch should call the direct issue endpoint, not search.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "number": 42, + "html_url": "https://github.com/test/repo/issues/42", + "title": "Direct issue", + "body": "Issue body", + "state": "open", + "assignees": [{"login": "alice"}], + "labels": [{"name": "feature"}], + } + mock_response.raise_for_status = MagicMock() + mock_get.return_value = mock_response + + adapter = GitHubAdapter(repo_owner="test", repo_name="repo", api_token="token") + items = adapter.fetch_backlog_items(BacklogFilters(issue_id="42")) + + assert len(items) == 1 + assert items[0].id == "42" + request_url = mock_get.call_args[0][0] + assert request_url.endswith("/repos/test/repo/issues/42") + assert "/search/issues" not in request_url + + @beartype + @patch("specfact_cli.adapters.github.requests.get") + def test_fetch_backlog_items_issue_id_respects_explicit_state_filter(self, mock_get: MagicMock) -> None: + """Direct issue lookup should still honor explicit post-filters.""" + mock_response = MagicMock() + mock_response.json.return_value = { + "number": 42, + "html_url": "https://github.com/test/repo/issues/42", + "title": "Direct issue", + "body": "Issue body", + "state": "open", + "assignees": [{"login": "alice"}], + "labels": [{"name": "feature"}], + } + mock_response.raise_for_status = MagicMock() + mock_get.return_value = mock_response + + adapter = GitHubAdapter(repo_owner="test", repo_name="repo", api_token="token") + items = adapter.fetch_backlog_items(BacklogFilters(issue_id="42", state="closed")) + + assert items == [] + @beartype @patch("specfact_cli.adapters.github.requests.get") def test_fetch_backlog_items_with_me_assignee_uses_at_me_query(self, mock_get: MagicMock) -> None: @@ -123,6 +199,36 @@ def test_fetch_backlog_items_with_me_assignee_uses_at_me_query(self, mock_get: M assert "assignee:@me" in call_args[1]["params"]["q"] assert len(items) == 1 + @beartype + @patch("specfact_cli.adapters.github.requests.get") + def test_fetch_backlog_items_retries_transient_transport_errors(self, mock_get: MagicMock) -> None: + """Search fetch should retry transient transport failures before succeeding.""" + success_response = MagicMock() + success_response.json.return_value = { + "items": [ + { + "number": 1, + "html_url": "https://github.com/test/repo/issues/1", + "title": "Recovered issue", + "body": "Issue body", + "state": "open", + "assignees": [], + "labels": [], + } + ] + } + success_response.raise_for_status = MagicMock() + empty_response = MagicMock() + empty_response.json.return_value = {"items": []} + empty_response.raise_for_status = MagicMock() + mock_get.side_effect = [requests.ConnectionError("temporary outage"), success_response, empty_response] + + adapter = GitHubAdapter(repo_owner="test", repo_name="repo", api_token="token") + items = adapter.fetch_backlog_items(BacklogFilters(state="open")) + + assert len(items) == 1 + assert mock_get.call_count >= 2 + @beartype @patch("specfact_cli.adapters.github.requests.patch") def test_update_backlog_item(self, mock_patch: MagicMock) -> None: @@ -225,3 +331,51 @@ def test_fetch_backlog_items_requires_repo(self) -> None: with pytest.raises(ValueError, match="repo_owner and repo_name required"): adapter.fetch_backlog_items(filters) + + @beartype + def test_resolve_github_type_mapping_story_falls_back_to_feature(self) -> None: + """GitHub type mapping should fall back story -> feature when story is absent.""" + mapping = {"feature": "IT_FEATURE_ID"} + resolved = GitHubAdapter._resolve_github_type_mapping_id(mapping, "story") + assert resolved == "IT_FEATURE_ID" + + @beartype + @patch.object(GitHubAdapter, "_github_graphql") + def test_try_set_github_issue_type_uses_story_feature_fallback(self, mock_graphql: MagicMock) -> None: + """Issue type assignment should use feature id when story id is unavailable.""" + adapter = GitHubAdapter(repo_owner="test", repo_name="repo", api_token="token") + adapter._try_set_github_issue_type( + "ISSUE_NODE_ID", + "story", + {"github_issue_types": {"type_ids": {"feature": "IT_FEATURE_ID"}}}, + ) + + assert mock_graphql.called is True + variables = mock_graphql.call_args[0][1] + assert variables["issueTypeId"] == "IT_FEATURE_ID" + + @beartype + @patch.object(GitHubAdapter, "_github_graphql") + def test_try_set_github_project_type_field_uses_story_feature_fallback(self, mock_graphql: MagicMock) -> None: + """ProjectV2 type assignment should use feature option when story option is unavailable.""" + adapter = GitHubAdapter(repo_owner="test", repo_name="repo", api_token="token") + mock_graphql.side_effect = [ + {"addProjectV2ItemById": {"item": {"id": "ITEM_NODE_ID"}}}, + {"updateProjectV2ItemFieldValue": {"projectV2Item": {"id": "ITEM_NODE_ID"}}}, + ] + + adapter._try_set_github_project_type_field( + "ISSUE_NODE_ID", + "story", + { + "github_project_v2": { + "project_id": "PVT_ID", + "type_field_id": "FIELD_ID", + "type_option_ids": {"feature": "OPT_FEATURE_ID"}, + } + }, + ) + + assert mock_graphql.call_count == 2 + variables = mock_graphql.call_args_list[1][0][1] + assert variables["optionId"] == "OPT_FEATURE_ID" diff --git a/tests/unit/backlog/test_ai_refiner.py b/tests/unit/backlog/test_ai_refiner.py index 3142a707..70369195 100644 --- a/tests/unit/backlog/test_ai_refiner.py +++ b/tests/unit/backlog/test_ai_refiner.py @@ -110,6 +110,31 @@ def test_generate_refinement_prompt_instructs_to_omit_unknown_metadata_fields( assert "omit unknown metadata fields" in prompt.lower() assert "do not emit placeholders" in prompt.lower() + @beartype + def test_generate_refinement_prompt_for_ado_excludes_story_points_from_required_sections( + self, refiner: BacklogAIRefiner + ) -> None: + """ADO prompt should not require Story Points as body section.""" + template = BacklogTemplate( + template_id="scrum_user_story_v1", + name="Scrum User Story", + description="", + required_sections=["As a", "I want", "So that", "Acceptance Criteria", "Story Points"], + optional_sections=["Area Path", "Iteration Path", "Notes"], + ) + item = BacklogItem( + id="100", + provider="ado", + url="https://dev.azure.com/org/project/_workitems/edit/100", + title="Story", + body_markdown="Body", + state="Active", + ) + prompt = refiner.generate_refinement_prompt(item, template) + required_section_block = prompt.split("Required Sections:")[1].split("Optional Sections:")[0] + assert "- Story Points" not in required_section_block + assert "- As a" in required_section_block + @beartype def test_validate_and_score_complete_refinement( self, refiner: BacklogAIRefiner, arbitrary_backlog_item: BacklogItem, user_story_template: BacklogTemplate diff --git a/tests/unit/backlog/test_converter.py b/tests/unit/backlog/test_converter.py index a9e790ba..a700a9a2 100644 --- a/tests/unit/backlog/test_converter.py +++ b/tests/unit/backlog/test_converter.py @@ -10,6 +10,7 @@ import pytest from beartype import beartype +from icontract.errors import ViolationError from specfact_cli.backlog.converter import convert_ado_work_item_to_backlog_item, convert_github_issue_to_backlog_item @@ -156,7 +157,7 @@ def test_convert_github_issue_missing_required_fields_raises(self) -> None: # Missing number and url } - with pytest.raises(ValueError, match="must have 'number' or 'id'"): + with pytest.raises((ValueError, ViolationError), match=r"number|id"): convert_github_issue_to_backlog_item(issue_data) @@ -185,6 +186,24 @@ def test_convert_minimal_ado_work_item(self) -> None: assert item.state == "new" assert item.canonical_url is None + @beartype + def test_convert_ado_work_item_converts_html_description_to_markdown(self) -> None: + """ADO HTML description should be normalized to markdown-like text.""" + work_item_data = { + "id": 790, + "url": "https://dev.azure.com/org/project/_apis/wit/workitems/790", + "fields": { + "System.Title": "HTML Work Item", + "System.Description": "<h2>Summary</h2><p>This is <strong>important</strong>.</p>", + "System.State": "Active", + }, + } + + item = convert_ado_work_item_to_backlog_item(work_item_data) + + assert "## Summary" in item.body_markdown + assert "**important**" in item.body_markdown + @beartype def test_convert_ado_work_item_with_assignee(self) -> None: """Test converting ADO work item with assignee.""" @@ -364,7 +383,7 @@ def test_convert_ado_work_item_missing_required_fields_raises(self) -> None: # Missing url and fields } - with pytest.raises(ValueError, match="must have 'url'"): + with pytest.raises((ValueError, ViolationError), match="url"): convert_ado_work_item_to_backlog_item(work_item_data) @beartype diff --git a/tests/unit/backlog/test_field_mappers.py b/tests/unit/backlog/test_field_mappers.py index 814461fb..17a8ed98 100644 --- a/tests/unit/backlog/test_field_mappers.py +++ b/tests/unit/backlog/test_field_mappers.py @@ -232,6 +232,57 @@ def test_extract_acceptance_criteria_from_field(self) -> None: fields = mapper.extract_fields(item_data) assert fields["acceptance_criteria"] == "AC1\nAC2" + def test_extract_acceptance_criteria_converts_html_to_markdown(self) -> None: + """HTML-rich acceptance criteria should be normalized to markdown-like text.""" + mapper = AdoFieldMapper() + item_data = { + "fields": { + "System.Description": "<p>Description</p>", + "System.AcceptanceCriteria": "<p><strong>Given</strong> user logs in</p><ul><li>Then dashboard loads</li></ul>", + "System.Title": "Test Item", + } + } + fields = mapper.extract_fields(item_data) + assert "**Given** user logs in" in (fields["acceptance_criteria"] or "") + assert "- Then dashboard loads" in (fields["acceptance_criteria"] or "") + + def test_extract_acceptance_criteria_preserves_br_line_breaks(self) -> None: + """BR tags should be converted into newline separators during normalization.""" + mapper = AdoFieldMapper() + item_data = { + "fields": { + "System.Description": "Description", + "System.AcceptanceCriteria": "<p>Given user logs in<br />Then dashboard loads</p>", + "System.Title": "Test Item", + } + } + fields = mapper.extract_fields(item_data) + assert fields["acceptance_criteria"] == "Given user logs in\nThen dashboard loads" + + def test_extract_description_preserves_non_html_angle_brackets(self) -> None: + """Plain text with angle brackets should not be treated as HTML and stripped.""" + mapper = AdoFieldMapper() + item_data = { + "fields": { + "System.Description": "Validate x < y > z and keep <tenant_id> placeholder.", + "System.Title": "Test Item", + } + } + fields = mapper.extract_fields(item_data) + assert fields["description"] == "Validate x < y > z and keep <tenant_id> placeholder." + + def test_extract_description_preserves_placeholders_inside_html_content(self) -> None: + """Known HTML normalization should not remove non-HTML placeholder tokens.""" + mapper = AdoFieldMapper() + item_data = { + "fields": { + "System.Description": "<p>Use <tenant_id> from context.</p>", + "System.Title": "Test Item", + } + } + fields = mapper.extract_fields(item_data) + assert fields["description"] == "Use <tenant_id> from context." + def test_extract_acceptance_criteria_from_microsoft_vsts_common(self) -> None: """Test extracting acceptance criteria from Microsoft.VSTS.Common.AcceptanceCriteria field.""" mapper = AdoFieldMapper() @@ -442,6 +493,24 @@ def test_map_from_canonical(self) -> None: assert "System.WorkItemType" in ado_fields assert ado_fields["System.WorkItemType"] == "User Story" + def test_resolve_write_target_prefers_custom_mapping_field(self, tmp_path: Path) -> None: + """Write target resolution should prioritize explicit custom field mapping.""" + custom_mapping_file = tmp_path / "ado_custom.yaml" + custom_mapping_data = { + "field_mappings": { + "Microsoft.VSTS.Scheduling.StoryPoints": "story_points", + }, + } + custom_mapping_file.write_text(yaml.dump(custom_mapping_data), encoding="utf-8") + + mapper = AdoFieldMapper(custom_mapping_file=custom_mapping_file) + resolved = mapper.resolve_write_target_field( + canonical_field="story_points", + provider_field_names={"Microsoft.VSTS.Common.StoryPoints"}, + ) + + assert resolved == "Microsoft.VSTS.Scheduling.StoryPoints" + class TestCustomTemplateMapping: """Tests for custom template mapping support.""" diff --git a/tests/unit/backlog/test_template_detector.py b/tests/unit/backlog/test_template_detector.py index e2820c97..33c47584 100644 --- a/tests/unit/backlog/test_template_detector.py +++ b/tests/unit/backlog/test_template_detector.py @@ -9,7 +9,7 @@ import pytest from beartype import beartype -from specfact_cli.backlog.template_detector import TemplateDetector +from specfact_cli.backlog.template_detector import TemplateDetector, get_effective_required_sections from specfact_cli.models.backlog_item import BacklogItem from specfact_cli.templates.registry import BacklogTemplate, TemplateRegistry @@ -223,3 +223,23 @@ def test_detect_with_persona_framework_provider_filtering(self, template_registr # Test without framework filter (should match default) result = detector.detect_template(item, provider="github", framework=None) assert result.template_id in ["user_story_v1", "scrum_story_v1"] # Either could match + + @beartype + def test_ado_effective_required_sections_ignores_structured_metric_sections(self) -> None: + """ADO should not require structured metric sections in markdown body.""" + template = BacklogTemplate( + template_id="scrum_user_story_v1", + name="Scrum User Story", + required_sections=["As a", "I want", "So that", "Acceptance Criteria", "Story Points"], + ) + item = BacklogItem( + id="8", + provider="ado", + url="https://dev.azure.com/org/project/_workitems/edit/8", + title="User Story", + body_markdown="## As a\nuser\n\n## I want\nvalue\n\n## So that\nbenefit\n\n## Acceptance Criteria\n- [ ] done", + state="Active", + ) + effective = get_effective_required_sections(item, template) + assert "Story Points" not in effective + assert "Acceptance Criteria" in effective diff --git a/tests/unit/commands/test_backlog_commands.py b/tests/unit/commands/test_backlog_commands.py index e047b840..458d8640 100644 --- a/tests/unit/commands/test_backlog_commands.py +++ b/tests/unit/commands/test_backlog_commands.py @@ -22,11 +22,14 @@ _build_refine_export_content, _build_refine_preview_comment_empty_panel, _build_refine_preview_comment_panels, + _detect_significant_content_loss, _item_needs_refinement, _parse_refined_export_markdown, _parse_refinement_output_fields, + _resolve_backlog_provider_framework, _resolve_refine_export_comment_window, _resolve_refine_preview_comment_window, + _resolve_target_template_for_refine_item, app as backlog_app, ) from specfact_cli.templates.registry import BacklogTemplate, TemplateRegistry @@ -35,6 +38,139 @@ runner = CliRunner() +@patch("specfact_cli.modules.backlog.src.commands._resolve_standup_options") +@patch("specfact_cli.modules.backlog.src.commands._fetch_backlog_items") +def test_daily_issue_id_bypasses_implicit_default_state( + mock_fetch_backlog_items: MagicMock, + mock_resolve_standup_options: MagicMock, +) -> None: + """`backlog daily --id` should not apply implicit default state/assignee filters.""" + mock_resolve_standup_options.return_value = ("open", 20, "me") + mock_fetch_backlog_items.return_value = [ + BacklogItem( + id="185", + provider="ado", + url="https://dev.azure.com/org/project/_apis/wit/workitems/185", + title="Fix the error", + body_markdown="Description", + state="new", + assignees=["dominikus.nold@web.de"], + ) + ] + + result = runner.invoke( + backlog_app, + [ + "daily", + "ado", + "--ado-org", + "dominikusnold", + "--ado-project", + "Specfact CLI", + "--id", + "185", + ], + ) + + assert result.exit_code == 0 + assert "No backlog item with id" not in result.stdout + assert mock_fetch_backlog_items.call_args.kwargs["state"] is None + assert mock_fetch_backlog_items.call_args.kwargs["assignee"] is None + + +@patch("specfact_cli.modules.backlog.src.commands._resolve_standup_options") +@patch("specfact_cli.modules.backlog.src.commands._fetch_backlog_items") +def test_daily_reports_default_filters_when_no_items( + mock_fetch_backlog_items: MagicMock, + mock_resolve_standup_options: MagicMock, +) -> None: + """`backlog daily` should show implicit defaults in UI output for empty results.""" + mock_resolve_standup_options.return_value = ("open", 20, "me") + mock_fetch_backlog_items.return_value = [] + + result = runner.invoke( + backlog_app, + [ + "daily", + "ado", + "--ado-org", + "dominikusnold", + "--ado-project", + "Specfact CLI", + ], + ) + + assert result.exit_code == 0 + assert "Applied filters:" in result.stdout + assert "state=open (default)" in result.stdout + assert "assignee=me" in result.stdout + assert "(default)" in result.stdout + assert "limit=20 (default)" in result.stdout + + +@patch("specfact_cli.modules.backlog.src.commands._resolve_standup_options") +@patch("specfact_cli.modules.backlog.src.commands._fetch_backlog_items") +def test_daily_accepts_any_for_state_and_assignee_as_no_filter( + mock_fetch_backlog_items: MagicMock, + mock_resolve_standup_options: MagicMock, +) -> None: + """`--state any` / `--assignee any` should disable both filters.""" + mock_resolve_standup_options.return_value = (None, 20, None) + mock_fetch_backlog_items.return_value = [] + + result = runner.invoke( + backlog_app, + [ + "daily", + "ado", + "--ado-org", + "dominikusnold", + "--ado-project", + "Specfact CLI", + "--state", + "any", + "--assignee", + "any", + ], + ) + + assert result.exit_code == 0 + assert mock_resolve_standup_options.call_args.kwargs["state_filter_disabled"] is True + assert mock_resolve_standup_options.call_args.kwargs["assignee_filter_disabled"] is True + assert mock_fetch_backlog_items.call_args.kwargs["state"] is None + assert mock_fetch_backlog_items.call_args.kwargs["assignee"] is None + + +@patch("specfact_cli.modules.backlog.src.commands._fetch_backlog_items") +def test_daily_any_filters_render_as_disabled_scope( + mock_fetch_backlog_items: MagicMock, +) -> None: + """`--state any --assignee any` should render disabled filter scope in output.""" + mock_fetch_backlog_items.return_value = [] + + result = runner.invoke( + backlog_app, + [ + "daily", + "ado", + "--ado-org", + "dominikusnold", + "--ado-project", + "Specfact CLI", + "--state", + "any", + "--assignee", + "any", + ], + ) + + assert result.exit_code == 0 + output = " ".join(result.stdout.split()) + assert "Applied filters:" in output + assert "state=โ€” (explicit)" in output + assert "assignee=โ€” (explicit)" in output + + class TestBacklogPreviewOutput: """Tests for backlog preview output display.""" @@ -116,7 +252,7 @@ def test_map_fields_fetches_ado_fields( mock_select: MagicMock, mock_get: MagicMock, ) -> None: - """Test that map-fields command fetches fields from ADO API.""" + """Test that map-fields command fetches ADO metadata endpoints.""" # Mock ADO API response mock_response = MagicMock() mock_response.json.return_value = { @@ -157,10 +293,11 @@ def test_map_fields_fetches_ado_fields( # Should call ADO API assert mock_get.called - call_args = mock_get.call_args - assert "test-org" in call_args[0][0] - assert "test-project" in call_args[0][0] - assert "_apis/wit/fields" in call_args[0][0] + called_urls = [str(call.args[0]) for call in mock_get.call_args_list if call.args] + assert any("test-org" in url for url in called_urls) + assert any("test-project" in url for url in called_urls) + # map-fields now resolves/processes work-item type metadata before field mapping prompts + assert any("_apis/wit/workitemtypes" in url for url in called_urls) @patch("requests.get") @patch("questionary.select") @@ -508,6 +645,98 @@ def test_map_fields_blank_project_v2_clears_stale_project_mapping( provider_fields = github_settings.get("provider_fields", {}) assert provider_fields.get("github_project_v2") is None + @patch("requests.get") + @patch("questionary.select") + def test_map_fields_ado_framework_cli_persists_to_config_and_mapping( + self, mock_select: MagicMock, mock_get: MagicMock, tmp_path + ) -> None: + """ADO map-fields should persist selected framework for deterministic refine steering.""" + # ADO fields API response + mock_fields_response = MagicMock() + mock_fields_response.raise_for_status.return_value = None + mock_fields_response.json.return_value = { + "value": [ + {"referenceName": "System.Description", "name": "Description"}, + {"referenceName": "System.AcceptanceCriteria", "name": "Acceptance Criteria"}, + {"referenceName": "Microsoft.VSTS.Scheduling.StoryPoints", "name": "Story Points"}, + ] + } + # ADO work item types API response (detection call; should not override explicit CLI value) + mock_types_response = MagicMock() + mock_types_response.raise_for_status.return_value = None + mock_types_response.json.return_value = { + "value": [{"name": "Product Backlog Item"}, {"name": "Bug"}, {"name": "Task"}] + } + mock_get.side_effect = [mock_fields_response, mock_types_response] + + # Field selection prompts: map none for all canonical fields + mock_select.return_value.ask.return_value = "<no mapping>" + + import os + + cwd = Path.cwd() + try: + os.chdir(tmp_path) + result = runner.invoke( + backlog_app, + [ + "map-fields", + "--provider", + "ado", + "--ado-org", + "test-org", + "--ado-project", + "test-project", + "--ado-token", + "test-token", + "--ado-framework", + "scrum", + ], + ) + finally: + os.chdir(cwd) + + assert result.exit_code == 0 + ado_custom = tmp_path / ".specfact" / "templates" / "backlog" / "field_mappings" / "ado_custom.yaml" + assert ado_custom.exists() + custom_payload = yaml.safe_load(ado_custom.read_text(encoding="utf-8")) + assert custom_payload["framework"] == "scrum" + + cfg_file = tmp_path / ".specfact" / "backlog-config.yaml" + assert cfg_file.exists() + loaded = yaml.safe_load(cfg_file.read_text(encoding="utf-8")) + ado_settings = loaded["backlog_config"]["providers"]["ado"]["settings"] + assert ado_settings["framework"] == "scrum" + + def test_resolve_backlog_provider_framework_reads_backlog_config(self, tmp_path) -> None: + """Framework resolver should read provider framework from backlog-config settings.""" + import os + + spec_dir = tmp_path / ".specfact" + spec_dir.mkdir(parents=True, exist_ok=True) + (spec_dir / "backlog-config.yaml").write_text( + """ +backlog_config: + providers: + ado: + adapter: ado + project_id: test-org/test-project + settings: + framework: scrum + field_mapping_file: .specfact/templates/backlog/field_mappings/ado_custom.yaml +""".strip(), + encoding="utf-8", + ) + + cwd = Path.cwd() + try: + os.chdir(tmp_path) + resolved = _resolve_backlog_provider_framework("ado") + finally: + os.chdir(cwd) + + assert resolved == "scrum" + def test_backlog_init_config_scaffolds_default_file(self, tmp_path) -> None: """Test backlog init-config creates default backlog-config scaffold.""" import os @@ -581,6 +810,25 @@ def test_parses_single_item_with_body_and_id(self) -> None: assert result["issue-42"]["body_markdown"] == "Refined body text here." assert result["issue-42"].get("title") == "My Title" + def test_parses_item_when_file_starts_with_item_header(self) -> None: + """Parser handles item heading at file start and does not leak heading marker into title.""" + content = """## Item 1: Story title from heading + +**ID**: 123 +**URL**: u +**State**: open +**Provider**: ado + +**Body**: +```markdown +Body content +``` +""" + result = _parse_refined_export_markdown(content) + assert "123" in result + assert result["123"].get("title") == "Story title from heading" + assert result["123"].get("title", "").startswith("## Item") is False + def test_parses_acceptance_criteria_and_metrics(self) -> None: """Parser extracts acceptance criteria and metrics when present.""" content = """ @@ -668,6 +916,32 @@ def foo(): assert "Then we see the error." in body +class TestContentLossDetection: + """Tests for refined-content loss guard used by tmp import.""" + + def test_detects_significant_content_loss(self) -> None: + original = ( + "Implement OAuth login with PKCE, refresh-token rotation, role-based access checks, " + "audit logging for login events, and explicit error handling for expired tokens." + ) + refined = "Implement login support." + has_loss, reason = _detect_significant_content_loss(original, refined) + assert has_loss is True + assert reason + + def test_allows_structured_rewrite_without_loss(self) -> None: + original = ( + "As a platform user I need OAuth login with PKCE and refresh-token rotation so that " + "authentication remains secure and users can re-authenticate without credential prompts." + ) + refined = ( + "## Description\n\nAs a platform user I need OAuth login with PKCE and refresh-token rotation " + "so authentication stays secure and re-authentication works without credential prompts." + ) + has_loss, _reason = _detect_significant_content_loss(original, refined) + assert has_loss is False + + class TestParseRefinementOutputFields: """Tests for parser that normalizes refinement output for writeback.""" @@ -980,6 +1254,23 @@ def test_refine_export_places_instructions_before_first_item(self) -> None: content = _build_refine_export_content(adapter="ado", items=[item], comments_by_item_id={}) assert content.index("## Copilot Instructions") < content.index("## Item 1:") + def test_refine_export_marks_id_as_mandatory_for_import(self) -> None: + """Export guidance should state ID is required and immutable for import.""" + item = BacklogItem( + id="42", + provider="ado", + url="https://dev.azure.com/org/project/_workitems/edit/42", + title="Story", + body_markdown="Body text", + state="Active", + assignees=[], + ) + content = _build_refine_export_content(adapter="ado", items=[item], comments_by_item_id={}) + assert "**ID** is mandatory" in content + assert "must remain unchanged" in content + assert "Do NOT summarize, shorten, or drop details" in content + assert "Template Execution Rules (mandatory)" in content + def test_refine_export_includes_template_guidance_for_items(self) -> None: """Export includes template guidance similar to interactive prompts.""" item = BacklogItem( @@ -1042,6 +1333,119 @@ def test_refine_export_always_uses_full_comment_history(self) -> None: assert last_2 is None +class TestRefineImportFromTmp: + """Tests for refine --import-from-tmp behavior.""" + + @patch("specfact_cli.modules.backlog.src.commands._fetch_backlog_items") + def test_import_from_tmp_fails_when_no_parsed_ids_match_fetched_items( + self, mock_fetch_items: MagicMock, tmp_path + ) -> None: + """Import should fail fast when refined IDs do not match fetched backlog items.""" + mock_fetch_items.return_value = [ + BacklogItem( + id="1", + provider="github", + url="https://github.com/org/repo/issues/1", + title="Issue 1", + body_markdown="Original body", + state="open", + assignees=[], + ) + ] + + refined_file = tmp_path / "refined.md" + refined_file.write_text( + """ +## Item 1: Edited Title + +**ID**: 999 +**URL**: https://github.com/org/repo/issues/999 +**State**: open +**Provider**: github + +**Body**: +```markdown +Refined body +``` +""".strip(), + encoding="utf-8", + ) + + result = runner.invoke( + backlog_app, + [ + "refine", + "github", + "--repo-owner", + "org", + "--repo-name", + "repo", + "--import-from-tmp", + "--tmp-file", + str(refined_file), + ], + ) + + assert result.exit_code != 0 + assert "None of the refined item IDs matched fetched backlog items" in result.stdout + + @patch("specfact_cli.modules.backlog.src.commands._fetch_backlog_items") + def test_import_from_tmp_fails_when_refined_body_is_significantly_shortened( + self, mock_fetch_items: MagicMock, tmp_path + ) -> None: + """Import should fail when tmp refinement drops substantial original detail.""" + mock_fetch_items.return_value = [ + BacklogItem( + id="1", + provider="github", + url="https://github.com/org/repo/issues/1", + title="Issue 1", + body_markdown=( + "Implement OAuth login with PKCE, refresh-token rotation, role-based checks, " + "audit logging, and token-expiry handling." + ), + state="open", + assignees=[], + ) + ] + + refined_file = tmp_path / "refined.md" + refined_file.write_text( + """ +## Item 1: Edited Title + +**ID**: 1 +**URL**: https://github.com/org/repo/issues/1 +**State**: open +**Provider**: github + +**Body**: +```markdown +Implement login support. +``` +""".strip(), + encoding="utf-8", + ) + + result = runner.invoke( + backlog_app, + [ + "refine", + "github", + "--repo-owner", + "org", + "--repo-name", + "repo", + "--import-from-tmp", + "--tmp-file", + str(refined_file), + ], + ) + + assert result.exit_code != 0 + assert "appears to drop important detail" in result.stdout + + class TestRefinePreviewCommentUx: """Tests for refine preview comment progress and block rendering.""" @@ -1148,3 +1552,160 @@ def test_does_not_need_refinement_when_high_confidence_no_missing(self) -> None: ) result = _item_needs_refinement(item, detector, registry, None, "github", None, None) assert result is False + + def test_ado_does_not_require_story_points_heading_in_body_sections(self) -> None: + """ADO items should not be forced to include Story Points as markdown body heading.""" + registry = TemplateRegistry() + registry.register_template( + BacklogTemplate( + template_id="scrum-story", + name="Scrum Story", + description="", + required_sections=["As a", "I want", "So that", "Acceptance Criteria", "Story Points"], + ) + ) + detector = TemplateDetector(registry) + item = BacklogItem( + id="10", + provider="ado", + url="https://dev.azure.com/org/project/_workitems/edit/10", + title="User Story", + body_markdown="## As a\nuser\n\n## I want\nvalue\n\n## So that\nbenefit\n\n## Acceptance Criteria\n- [ ] done", + state="Active", + assignees=[], + story_points=5, + ) + # Should be considered already refined if no missing non-structured required sections. + assert _item_needs_refinement(item, detector, registry, None, "ado", "scrum", None) is False + + +class TestResolveTargetTemplateForRefineItem: + """Tests for template steering helper used by backlog refine.""" + + def test_ado_user_story_type_prefers_user_story_template(self) -> None: + """ADO User Story/PBI items should prefer user_story_v1 over generic ado_work_item_v1.""" + registry = TemplateRegistry() + registry.register_template( + BacklogTemplate( + template_id="ado_work_item_v1", + name="ADO Work Item", + description="", + provider="ado", + required_sections=["Description", "Acceptance Criteria"], + ) + ) + registry.register_template( + BacklogTemplate( + template_id="user_story_v1", + name="User Story", + description="", + required_sections=["As a", "I want", "So that", "Acceptance Criteria"], + ) + ) + detector = TemplateDetector(registry) + item = BacklogItem( + id="42", + provider="ado", + url="https://dev.azure.com/org/project/_workitems/edit/42", + title="User Story: refine mapping", + body_markdown="## Description\n\nBody\n\n## Acceptance Criteria\n- [ ] one", + state="Active", + assignees=[], + work_item_type="User Story", + ) + + resolved = _resolve_target_template_for_refine_item( + item, + detector=detector, + registry=registry, + template_id=None, + normalized_adapter="ado", + normalized_framework=None, + normalized_persona=None, + ) + + assert resolved is not None + assert resolved.template_id == "user_story_v1" + + def test_github_story_tag_prefers_user_story_template(self) -> None: + """GitHub story-labeled items should prefer user_story_v1 over generic enabler templates.""" + registry = TemplateRegistry() + registry.register_template( + BacklogTemplate( + template_id="enabler_v1", + name="Enabler", + description="", + provider="github", + required_sections=["Description"], + ) + ) + registry.register_template( + BacklogTemplate( + template_id="user_story_v1", + name="User Story", + description="", + provider=None, + required_sections=["As a", "I want", "So that", "Acceptance Criteria"], + ) + ) + detector = TemplateDetector(registry) + item = BacklogItem( + id="77", + provider="github", + url="https://github.com/o/r/issues/77", + title="Story: improve login flow", + body_markdown="## Description\n\nImprove flow", + state="open", + assignees=[], + tags=["story"], + ) + + resolved = _resolve_target_template_for_refine_item( + item, + detector=detector, + registry=registry, + template_id=None, + normalized_adapter="github", + normalized_framework=None, + normalized_persona=None, + ) + + assert resolved is not None + assert resolved.template_id == "user_story_v1" + + def test_non_story_item_does_not_recurse_and_resolves_detected_template(self) -> None: + """Non-story items should resolve without recursive fallback loops.""" + registry = TemplateRegistry() + registry.register_template( + BacklogTemplate( + template_id="enabler_v1", + name="Enabler", + description="", + provider="github", + required_sections=["Description"], + ) + ) + detector = TemplateDetector(registry) + item = BacklogItem( + id="88", + provider="github", + url="https://github.com/o/r/issues/88", + title="Improve pipeline", + body_markdown="## Description\n\nImprove pipeline execution.", + state="open", + assignees=[], + tags=["enhancement"], + ) + + resolved = _resolve_target_template_for_refine_item( + item, + detector=detector, + registry=registry, + template_id=None, + normalized_adapter="github", + normalized_framework=None, + normalized_persona=None, + ) + + assert resolved is not None + assert resolved.template_id == "enabler_v1" diff --git a/tests/unit/commands/test_backlog_daily.py b/tests/unit/commands/test_backlog_daily.py index 0ada8097..7a83db39 100644 --- a/tests/unit/commands/test_backlog_daily.py +++ b/tests/unit/commands/test_backlog_daily.py @@ -310,6 +310,22 @@ def test_resolve_standup_options_explicit_overrides_defaults(self) -> None: assert limit == 10 assert assignee is None + def test_resolve_standup_options_any_disables_default_filters(self) -> None: + """Explicit any/all/* should disable default state/assignee filters.""" + from specfact_cli.modules.backlog.src.commands import _resolve_standup_options + + state, limit, assignee = _resolve_standup_options( + None, + None, + None, + None, + state_filter_disabled=True, + assignee_filter_disabled=True, + ) + assert state is None + assert limit == 20 + assert assignee is None + def test_apply_filters_with_state_open_excludes_closed(self) -> None: """Default state 'open' excludes closed items.""" items = [ diff --git a/tests/unit/modules/module_registry/test_commands.py b/tests/unit/modules/module_registry/test_commands.py index 91e90bf1..3152b82d 100644 --- a/tests/unit/modules/module_registry/test_commands.py +++ b/tests/unit/modules/module_registry/test_commands.py @@ -4,6 +4,7 @@ from pathlib import Path +import pytest from typer.testing import CliRunner from specfact_cli.models.module_package import ModulePackageMetadata @@ -14,6 +15,15 @@ runner = CliRunner() +@pytest.fixture(autouse=True) +def _isolate_user_modules_root(monkeypatch, tmp_path: Path) -> None: + """Isolate user module root so tests do not depend on machine-local installs.""" + user_root = tmp_path / "user-modules" + user_root.mkdir(parents=True, exist_ok=True) + monkeypatch.setattr("specfact_cli.modules.module_registry.src.commands.USER_MODULES_ROOT", user_root) + monkeypatch.setattr("specfact_cli.registry.module_installer.USER_MODULES_ROOT", user_root, raising=False) + + def test_install_command_integration(monkeypatch, tmp_path: Path) -> None: monkeypatch.setattr("specfact_cli.modules.module_registry.src.commands.discover_all_modules", list) monkeypatch.setattr( @@ -81,7 +91,7 @@ def _install(module_id: str, version=None, **_kwargs): assert result.exit_code == 0 assert called["install"] is False - assert "already available" in result.stdout + assert "already installed" in result.stdout or "already available" in result.stdout def test_install_command_project_scope_installs_to_project_modules_root(monkeypatch, tmp_path: Path) -> None: @@ -1096,7 +1106,8 @@ def test_module_init_bootstraps_user_modules(monkeypatch) -> None: result = runner.invoke(app, ["init"]) assert result.exit_code == 0 - assert f"Seeded 2 module(s) into {USER_MODULES_ROOT}" in result.stdout + assert "Seeded 2 module(s) into" in result.stdout + assert str(USER_MODULES_ROOT) in result.stdout or "user-modules" in result.stdout def test_module_init_project_scope_defaults_to_cwd_repo(monkeypatch, tmp_path: Path) -> None: diff --git a/tests/unit/specfact_cli/registry/test_signing_artifacts.py b/tests/unit/specfact_cli/registry/test_signing_artifacts.py index 886fb939..8bee9f63 100644 --- a/tests/unit/specfact_cli/registry/test_signing_artifacts.py +++ b/tests/unit/specfact_cli/registry/test_signing_artifacts.py @@ -90,6 +90,43 @@ def test_sign_module_script_help_mentions_passphrase_options(): assert "--passphrase-stdin" in result.stdout +def test_sign_module_script_enforces_version_bump_before_key_validation(tmp_path: Path): + """Wrapper SHALL fail on unchanged module version even if signing key is missing.""" + if not SIGN_SCRIPT.exists(): + pytest.skip("sign-module.sh not present") + + import subprocess + + repo = tmp_path / "repo" + module_dir = repo / "modules" / "sample" + source = module_dir / "src" / "sample" / "main.py" + manifest = module_dir / "module-package.yaml" + source.parent.mkdir(parents=True) + manifest.write_text("name: sample\nversion: 0.1.0\npublisher: nold-ai\ncommands: [sample]\n", encoding="utf-8") + source.write_text("print('v1')\n", encoding="utf-8") + + subprocess.run(["git", "init"], cwd=repo, check=True, capture_output=True, text=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=repo, check=True, capture_output=True, text=True + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=repo, check=True, capture_output=True, text=True) + subprocess.run(["git", "add", "."], cwd=repo, check=True, capture_output=True, text=True) + subprocess.run(["git", "commit", "-m", "initial"], cwd=repo, check=True, capture_output=True, text=True) + + # Change module payload without bumping module version. + source.write_text("print('v2')\n", encoding="utf-8") + + result = subprocess.run( + ["bash", str(SIGN_SCRIPT), str(manifest)], + capture_output=True, + text=True, + cwd=repo, + timeout=20, + ) + assert result.returncode != 0 + assert "Module version must be incremented before signing changed module contents" in result.stderr + + def test_sign_modules_py_requires_key_unless_allow_unsigned(tmp_path: Path): """sign-modules.py SHALL fail without key unless --allow-unsigned is passed.""" if not SIGN_PYTHON_SCRIPT.exists(): @@ -383,6 +420,65 @@ def test_verify_modules_script_exists(): assert VERIFY_PYTHON_SCRIPT.exists(), "scripts/verify-modules-signature.py must exist" +def test_verify_script_reports_version_bump_failure_even_when_checksum_fails(tmp_path: Path): + """Verifier SHALL report version-bump failures independently of checksum/signature failures.""" + if not VERIFY_PYTHON_SCRIPT.exists() or not SIGN_PYTHON_SCRIPT.exists(): + pytest.skip("verification/signing scripts not present") + + import subprocess + + repo = tmp_path / "repo" + module_dir = repo / "modules" / "sample" + source = module_dir / "src" / "sample" / "main.py" + manifest = module_dir / "module-package.yaml" + source.parent.mkdir(parents=True) + manifest.write_text("name: sample\nversion: 0.1.0\npublisher: nold-ai\ncommands: [sample]\n", encoding="utf-8") + source.write_text("print('v1')\n", encoding="utf-8") + + subprocess.run(["git", "init"], cwd=repo, check=True, capture_output=True, text=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=repo, check=True, capture_output=True, text=True + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=repo, check=True, capture_output=True, text=True) + + # Create baseline integrity metadata and commit. + signed = subprocess.run( + ["python3", str(SIGN_PYTHON_SCRIPT), "--allow-unsigned", str(manifest)], + capture_output=True, + text=True, + cwd=repo, + timeout=20, + ) + assert signed.returncode == 0, signed.stderr + subprocess.run(["git", "add", "."], cwd=repo, check=True, capture_output=True, text=True) + subprocess.run(["git", "commit", "-m", "initial"], cwd=repo, check=True, capture_output=True, text=True) + + # Commit payload change without version bump/re-signing -> checksum mismatch + missing bump. + source.write_text("print('v2')\n", encoding="utf-8") + subprocess.run(["git", "add", "."], cwd=repo, check=True, capture_output=True, text=True) + subprocess.run( + ["git", "commit", "-m", "change without version bump"], cwd=repo, check=True, capture_output=True, text=True + ) + + result = subprocess.run( + [ + "python3", + str(VERIFY_PYTHON_SCRIPT), + "--enforce-version-bump", + "--version-check-base", + "HEAD~1", + ], + capture_output=True, + text=True, + cwd=repo, + timeout=20, + ) + assert result.returncode != 0 + combined = f"{result.stdout}\n{result.stderr}" + assert "checksum mismatch" in combined + assert "module version was not incremented" in combined + + def test_pr_orchestrator_contains_verify_module_signatures_job(): """PR orchestrator SHALL include module signature verification gate.""" if not PR_ORCHESTRATOR_WORKFLOW.exists():