diff --git a/CHANGELOG.md b/CHANGELOG.md index a7a6b246..de9ac1a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,20 @@ All notable changes to this project will be documented in this file. --- +## [0.26.14] - 2026-01-29 + +### Fixed (0.26.14) + +- **ADO backlog refine error logging and user-facing error UX** (fixes [#162](httsps://github.com/nold-ai/specfact-cli/issues/162)) + - **Debug log**: On ADO PATCH failure (backlog refine body, status update, comment, create work item), debug log now includes response body snippet and patch paths via `debug_log_operation(..., extra={"response_body": snippet, "patch_paths": [...]})` when `--debug` is set; snippet truncated (~1–2 KB) and redacted via `LoggerSetup.redact_secrets` + - **User-facing messages**: Console shows ADO error message (e.g. "Cannot find field System.AcceptanceCriteria") and actionable hint ("Check custom field mapping; see ado_custom.yaml or documentation."); when ADO message contains a field reference, visible message quotes it (e.g. "Field 'System.AcceptanceCriteria' not found") + - **Helper**: New `_log_ado_patch_failure()` in `ado.py` used at all PATCH failure sites for consistent logging and user messages; re-raised exception carries ADO context + - **Non-JSON/large body**: Non-JSON or oversized response body handled safely (no crash, truncated safe string in log/user message) + - **Docs**: debug-logging.md (ADO PATCH failure content, "Examining ADO API Errors"), troubleshooting.md ("Backlog refine or work item PATCH fails (400/422)"), adapters/azuredevops.md (error diagnostics link), README.md (debug note for ADO errors) + - **OS temp dir**: Export/import default paths use system temp directory (`tempfile.gettempdir()`) in backlog refine and sync bridge (backlog_commands.py, bridge_sync.py); help strings describe "/..." + +--- + ## [0.26.13] - 2026-01-29 ### Fixed (0.26.13) @@ -36,7 +50,7 @@ All notable changes to this project will be documented in this file. ### Fixed (0.26.11) - **Backlog refine --import-from-tmp**: Implemented import path so refined content from a temporary file is applied to backlog items - - **Parser**: Added `_parse_refined_export_markdown()` to parse the same markdown format produced by `--export-to-tmp` (## Item blocks, **ID**, **Body** in ```markdown ... ```, **Acceptance Criteria**, optional **Metrics**) + - **Parser**: Added `_parse_refined_export_markdown()` to parse the same markdown format produced by `--export-to-tmp` (## Item blocks, **ID**, **Body** in ```markdown ...```, **Acceptance Criteria**, optional **Metrics**) - **Import flow**: When `--import-from-tmp` (and optional `--tmp-file`) is used, the CLI reads the file, matches blocks to fetched items by ID, updates `body_markdown`, `acceptance_criteria`, and optionally title/metrics; without `--write` shows "Would update N item(s)", with `--write` calls `adapter.update_backlog_item()` for each and prints success summary - **Removed**: "Import functionality pending implementation" message and TODO - **Tests**: Unit tests for the parser (single item, acceptance criteria and metrics, header-only, blocks without ID) @@ -62,7 +76,7 @@ All notable changes to this project will be documented in this file. ### Fixed (0.26.10) - **Version Bump**: Corrected package version to 0.26.10 for PyPI publish (fixes incorrect version 0.26.9 publish issue) - - **Synced locations**: `pyproject.toml` (project.version), `setup.py` (version=), `src/__init__.py` (__version__), `src/specfact_cli/__init__.py` (__version__) + - **Synced locations**: `pyproject.toml` (project.version), `setup.py` (version=), `src/__init__.py` (**version**), `src/specfact_cli/__init__.py` (**version**) - When bumping version, update all four locations and add a CHANGELOG entry --- diff --git a/Language.ml b/Language.ml new file mode 100644 index 00000000..f0d3efbb --- /dev/null +++ b/Language.ml @@ -0,0 +1,679 @@ +(* Generated file. Do not edit. *) + +(* All the programming languages for which Semgrep has dedicated support. *) +type t = +| Apex +| Bash +| C +| Cairo +| Circom +| Clojure +| Cpp +| Csharp +| Dart +| Dockerfile +| Elixir +| Go +| Gosu +| Hack +| Html +| Java +| Js +| Json +| Jsonnet +| Julia +| Kotlin +| Lisp +| Lua +| Move_on_sui +| Move_on_aptos +| Ocaml +| Php +| Promql +| Protobuf +| Python2 +| Python3 +| Python +| Ql +| R +| Ruby +| Rust +| Scala +| Scheme +| Solidity +| Swift +| Terraform +| Ts +| Vue +| Xml +| Yaml + +(* + Maturity of the support for the programming language as shown to the + public. The constructors are sorted by increasing maturity, allowing + meaningful sorting using the default 'compare'. +*) +type maturity = +| Develop +| Alpha +| Beta +| Ga + +(* + Information about a supported programming language for which we have + a dedicated parser (target analyzer). Some of this information can also be + used for the purpose of target selection. +*) +type info = { + id: t; + id_string: string; + name: string; + keys: string list; + exts: string list; + maturity: maturity; + example_ext: string option; + excluded_exts: string list; + reverse_exts: string list option; + shebangs: string list; + tags: string list; +} + +let list = [ +{ + id = Apex; + id_string = "apex"; + name = "Apex"; + keys = [{|apex|}]; + exts = [{|.cls|}]; + maturity = Develop; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = [{|is_proprietary|}]; +}; +{ + id = Bash; + id_string = "bash"; + name = "Bash"; + keys = [{|bash|}; {|sh|}]; + exts = [{|.bash|}; {|.sh|}]; + maturity = Alpha; + example_ext = Some {|.sh|}; + excluded_exts = []; + reverse_exts = None; + shebangs = [{|bash|}; {|sh|}]; + tags = []; +}; +{ + id = C; + id_string = "c"; + name = "C"; + keys = [{|c|}]; + exts = [{|.c|}; {|.h|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Cairo; + id_string = "cairo"; + name = "Cairo"; + keys = [{|cairo|}]; + exts = [{|.cairo|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Circom; + id_string = "circom"; + name = "Circom"; + keys = [{|circom|}]; + exts = [{|.circom|}]; + maturity = Develop; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Clojure; + id_string = "clojure"; + name = "Clojure"; + keys = [{|clojure|}]; + exts = [{|.clj|}; {|.cljs|}; {|.cljc|}; {|.edn|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Cpp; + id_string = "cpp"; + name = "C++"; + keys = [{|cpp|}; {|c++|}]; + exts = [{|.cc|}; {|.cpp|}; {|.cxx|}; {|.c++|}; {|.pcc|}; {|.tpp|}; {|.C|}; {|.h|}; {|.hh|}; {|.hpp|}; {|.hxx|}; {|.inl|}; {|.ipp|}]; + maturity = Alpha; + example_ext = Some {|.cpp|}; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Csharp; + id_string = "csharp"; + name = "C#"; + keys = [{|csharp|}; {|c#|}]; + exts = [{|.cs|}]; + maturity = Ga; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Dart; + id_string = "dart"; + name = "Dart"; + keys = [{|dart|}]; + exts = [{|.dart|}]; + maturity = Develop; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +(* + 'Dockerfile' is the only standard name for Dockerfiles. + The extension '.Dockerfile' is cited in the official documentation as + a popular extension. Whatever naming scheme is used in practice and is + not ambiguous is welcome here. +*) +{ + id = Dockerfile; + id_string = "dockerfile"; + name = "Dockerfile"; + keys = [{|dockerfile|}; {|docker|}]; + exts = [{|.dockerfile|}; {|.Dockerfile|}; {|Dockerfile|}; {|dockerfile|}]; + maturity = Alpha; + example_ext = Some {|.dockerfile|}; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Elixir; + id_string = "elixir"; + name = "Elixir"; + keys = [{|ex|}; {|elixir|}]; + exts = [{|.ex|}; {|.exs|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = [{|is_proprietary|}]; +}; +{ + id = Go; + id_string = "go"; + name = "Go"; + keys = [{|go|}; {|golang|}]; + exts = [{|.go|}]; + maturity = Ga; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Gosu; + id_string = "gosu"; + name = "Gosu"; + keys = [{|gosu|}]; + exts = [{|.gs|}]; + maturity = Develop; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = [{|is_proprietary|}]; +}; +{ + id = Hack; + id_string = "hack"; + name = "Hack"; + keys = [{|hack|}]; + exts = [{|.hack|}; {|.hck|}; {|.hh|}]; + maturity = Develop; + example_ext = Some {|.hack|}; + excluded_exts = []; + reverse_exts = None; + shebangs = [{|hhvm|}]; + tags = []; +}; +{ + id = Html; + id_string = "html"; + name = "HTML"; + keys = [{|html|}]; + exts = [{|.htm|}; {|.html|}]; + maturity = Alpha; + example_ext = Some {|.html|}; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Java; + id_string = "java"; + name = "Java"; + keys = [{|java|}]; + exts = [{|.java|}]; + maturity = Ga; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Js; + id_string = "js"; + name = "JavaScript"; + keys = [{|js|}; {|javascript|}]; + exts = [{|.cjs|}; {|.js|}; {|.jsx|}; {|.mjs|}]; + maturity = Ga; + example_ext = Some {|.jsx|}; + excluded_exts = [{|.min.js|}]; + reverse_exts = None; + shebangs = [{|node|}; {|js|}; {|nodejs|}]; + tags = [{|is_js|}]; +}; +{ + id = Json; + id_string = "json"; + name = "JSON"; + keys = [{|json|}]; + exts = [{|.json|}; {|.ipynb|}]; + maturity = Ga; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Jsonnet; + id_string = "jsonnet"; + name = "Jsonnet"; + keys = [{|jsonnet|}]; + exts = [{|.jsonnet|}; {|.libsonnet|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Julia; + id_string = "julia"; + name = "Julia"; + keys = [{|julia|}]; + exts = [{|.jl|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Kotlin; + id_string = "kotlin"; + name = "Kotlin"; + keys = [{|kt|}; {|kotlin|}]; + exts = [{|.kt|}; {|.kts|}; {|.ktm|}]; + maturity = Beta; + example_ext = Some {|.kt|}; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Lisp; + id_string = "lisp"; + name = "Lisp"; + keys = [{|lisp|}]; + exts = [{|.lisp|}; {|.cl|}; {|.el|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Lua; + id_string = "lua"; + name = "Lua"; + keys = [{|lua|}]; + exts = [{|.lua|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = [{|lua|}]; + tags = []; +}; +(* + Move language with SUI flavor +*) +{ + id = Move_on_sui; + id_string = "move_on_sui"; + name = "Move on Sui"; + keys = [{|move_on_sui|}]; + exts = [{|.move|}]; + maturity = Develop; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +(* + Move language with Aptos flavor +*) +{ + id = Move_on_aptos; + id_string = "move_on_aptos"; + name = "Move on Aptos"; + keys = [{|move_on_aptos|}]; + exts = [{|.move|}]; + maturity = Develop; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Ocaml; + id_string = "ocaml"; + name = "OCaml"; + keys = [{|ocaml|}]; + exts = [{|.ml|}; {|.mli|}]; + maturity = Alpha; + example_ext = Some {|.ml|}; + excluded_exts = []; + reverse_exts = None; + shebangs = [{|ocaml|}; {|ocamlscript|}]; + tags = []; +}; +{ + id = Php; + id_string = "php"; + name = "PHP"; + keys = [{|php|}]; + exts = [{|.php|}; {|.tpl|}; {|.phtml|}]; + maturity = Ga; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = [{|php|}]; + tags = []; +}; +{ + id = Promql; + id_string = "promql"; + name = "Prometheus Query Language"; + keys = [{|promql|}]; + exts = [{|.promql|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Protobuf; + id_string = "protobuf"; + name = "Protocol Buffers"; + keys = [{|proto|}; {|protobuf|}; {|proto3|}]; + exts = [{|.proto|}]; + maturity = Develop; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Python2; + id_string = "python2"; + name = "Python 2"; + keys = [{|python2|}]; + exts = [{|.py|}; {|.pyi|}]; + maturity = Develop; + example_ext = Some {|.py|}; + excluded_exts = []; + reverse_exts = Some []; + shebangs = [{|python|}; {|python2|}]; + tags = [{|is_python|}]; +}; +{ + id = Python3; + id_string = "python3"; + name = "Python 3"; + keys = [{|python3|}]; + exts = [{|.py|}; {|.pyi|}]; + maturity = Develop; + example_ext = Some {|.py|}; + excluded_exts = []; + reverse_exts = Some []; + shebangs = [{|python|}; {|python3|}]; + tags = [{|is_python|}]; +}; +{ + id = Python; + id_string = "python"; + name = "Python"; + keys = [{|py|}; {|python|}]; + exts = [{|.py|}; {|.pyi|}]; + maturity = Ga; + example_ext = Some {|.py|}; + excluded_exts = []; + reverse_exts = None; + shebangs = [{|python|}; {|python2|}; {|python3|}]; + tags = [{|is_python|}]; +}; +{ + id = Ql; + id_string = "ql"; + name = "QL"; + keys = [{|ql|}]; + exts = [{|.ql|}; {|.qll|}]; + maturity = Alpha; + example_ext = Some {|.ql|}; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = R; + id_string = "r"; + name = "R"; + keys = [{|r|}]; + exts = [{|.r|}; {|.R|}]; + maturity = Alpha; + example_ext = Some {|.R|}; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Ruby; + id_string = "ruby"; + name = "Ruby"; + keys = [{|ruby|}]; + exts = [{|.rb|}]; + maturity = Ga; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = [{|ruby|}]; + tags = []; +}; +{ + id = Rust; + id_string = "rust"; + name = "Rust"; + keys = [{|rust|}]; + exts = [{|.rs|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = [{|run-cargo-script|}]; + tags = []; +}; +{ + id = Scala; + id_string = "scala"; + name = "Scala"; + keys = [{|scala|}]; + exts = [{|.scala|}]; + maturity = Ga; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = [{|scala|}]; + tags = []; +}; +{ + id = Scheme; + id_string = "scheme"; + name = "Scheme"; + keys = [{|scheme|}]; + exts = [{|.scm|}; {|.ss|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Solidity; + id_string = "solidity"; + name = "Solidity"; + keys = [{|solidity|}; {|sol|}]; + exts = [{|.sol|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Swift; + id_string = "swift"; + name = "Swift"; + keys = [{|swift|}]; + exts = [{|.swift|}]; + maturity = Alpha; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Terraform; + id_string = "terraform"; + name = "Terraform"; + keys = [{|tf|}; {|hcl|}; {|terraform|}]; + exts = [{|.tf|}; {|.hcl|}; {|.tfvars|}]; + maturity = Ga; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Ts; + id_string = "ts"; + name = "TypeScript"; + keys = [{|ts|}; {|typescript|}]; + exts = [{|.ts|}; {|.tsx|}]; + maturity = Ga; + example_ext = Some {|.tsx|}; + excluded_exts = [{|.d.ts|}]; + reverse_exts = None; + shebangs = [{|ts-node|}]; + tags = [{|is_js|}]; +}; +{ + id = Vue; + id_string = "vue"; + name = "Vue"; + keys = [{|vue|}]; + exts = [{|.vue|}]; + maturity = Develop; + example_ext = None; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Xml; + id_string = "xml"; + name = "XML"; + keys = [{|xml|}]; + exts = [{|.xml|}; {|.plist|}]; + maturity = Alpha; + example_ext = Some {|.xml|}; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +{ + id = Yaml; + id_string = "yaml"; + name = "YAML"; + keys = [{|yaml|}]; + exts = [{|.yml|}; {|.yaml|}]; + maturity = Alpha; + example_ext = Some {|.yaml|}; + excluded_exts = []; + reverse_exts = None; + shebangs = []; + tags = []; +}; +] diff --git a/Language.mli b/Language.mli new file mode 100644 index 00000000..092366f4 --- /dev/null +++ b/Language.mli @@ -0,0 +1,86 @@ +(* Generated file. Do not edit. *) + +(* All the programming languages for which Semgrep has dedicated support. *) +type t = +| Apex +| Bash +| C +| Cairo +| Circom +| Clojure +| Cpp +| Csharp +| Dart +| Dockerfile +| Elixir +| Go +| Gosu +| Hack +| Html +| Java +| Js +| Json +| Jsonnet +| Julia +| Kotlin +| Lisp +| Lua +| Move_on_sui +| Move_on_aptos +| Ocaml +| Php +| Promql +| Protobuf +| Python2 +| Python3 +| Python +| Ql +| R +| Ruby +| Rust +| Scala +| Scheme +| Solidity +| Swift +| Terraform +| Ts +| Vue +| Xml +| Yaml + +(* + Maturity of the support for the programming language as shown to the + public. The constructors are sorted by increasing maturity, allowing + meaningful sorting using the default 'compare'. +*) +type maturity = +| Develop +| Alpha +| Beta +| Ga + +(* + Information about a supported programming language for which we have + a dedicated parser (target analyzer). Some of this information can also be + used for the purpose of target selection. +*) +type info = { + id: t; + id_string: string; + name: string; + keys: string list; + exts: string list; + maturity: maturity; + example_ext: string option; + excluded_exts: string list; + reverse_exts: string list option; + shebangs: string list; + tags: string list; +} + +(* + List of all the programming languages for which Semgrep has dedicated + support. This list is sufficient to produce fast lookup tables implementing + to_string, of_string, etc. +*) +val list : info list diff --git a/README.md b/README.md index eef26c84..3831a8ab 100644 --- a/README.md +++ b/README.md @@ -339,7 +339,7 @@ hatch run contract-test-full - πŸ’¬ **Questions?** [GitHub Discussions](https://github.com/nold-ai/specfact-cli/discussions) - πŸ› **Found a bug?** [GitHub Issues](https://github.com/nold-ai/specfact-cli/issues) -- πŸ” **Debugging I/O or API issues?** Run with `--debug`; logs are written to `~/.specfact/logs/specfact-debug.log`. See [Debug Logging](docs/reference/debug-logging.md). +- πŸ” **Debugging I/O or API issues?** Run with `--debug`; logs are written to `~/.specfact/logs/specfact-debug.log`. With `--debug`, ADO API errors include response snippet and patch paths in the log. See [Debug Logging](docs/reference/debug-logging.md). - πŸ“§ **Need help?** [hello@noldai.com](mailto:hello@noldai.com) - 🌐 **Learn more:** [specfact.com](https://specfact.com) β€’ [specfact.io](https://specfact.io) β€’ [specfact.dev](https://specfact.dev) diff --git a/docs/adapters/azuredevops.md b/docs/adapters/azuredevops.md index 1c6421d9..7c476e5c 100644 --- a/docs/adapters/azuredevops.md +++ b/docs/adapters/azuredevops.md @@ -182,6 +182,10 @@ adapter = AdoAdapter( 3. Copy the token (it's only shown once) 4. Set as environment variable: `export AZURE_DEVOPS_TOKEN='your-token'` +### Error diagnostics (PATCH failures) + +When a work item PATCH fails (e.g. HTTP 400 during backlog refine or status update), the CLI shows the ADO error message and a hint in the console. With `--debug`, the log includes the ADO response snippet and the JSON Patch paths attempted so you can identify the failing field. See [Debug Logging – Examining ADO API Errors](../reference/debug-logging.md#examining-ado-api-errors) and [Troubleshooting – Backlog refine or work item PATCH fails (400/422)](../guides/troubleshooting.md#backlog-refine-or-work-item-patch-fails-400422). + ## Usage Examples ### Export Change Proposal to Azure DevOps diff --git a/docs/guides/troubleshooting.md b/docs/guides/troubleshooting.md index 1307abff..dee2869f 100644 --- a/docs/guides/troubleshooting.md +++ b/docs/guides/troubleshooting.md @@ -765,6 +765,28 @@ The command automatically uses tokens in this order: 4. **Validate schema**: Ensure the file matches `FieldMappingConfig` schema 5. **Automatic detection**: Custom mappings are automatically detected - no restart needed. If not working, check file path and syntax. +### Backlog refine or work item PATCH fails (400/422) + +**Issue**: `specfact backlog refine ado ... --write` or work item update fails with HTTP 400/422 (e.g. "400 Client Error: Bad Request") or an ADO message like "TF51535: Cannot find field System.AcceptanceCriteria." + +**Cause**: The Azure DevOps project may use a custom process template where field names or paths differ from defaults (e.g. no `System.AcceptanceCriteria`). The JSON Patch sent by the CLI targets a field that does not exist or is not writable in that project. + +**Solutions**: + +1. **Read the console message** – The CLI prints the ADO error text and a hint, e.g. "Check custom field mapping; see ado_custom.yaml or documentation." If a field is named (e.g. "Field 'System.AcceptanceCriteria' not found"), that is the one to fix in mapping or template. + +2. **Run with `--debug` and inspect the log** – This gives you the exact ADO response and the patch paths that were sent: + - Run: `specfact --debug backlog refine ado --ado-org --ado-project ...` (or the failing command). + - Open `~/.specfact/logs/specfact-debug.log` and search for `"operation": "ado_patch"` and `"status": "failed"`. + - In that line, `extra.response_body` is a redacted snippet of the ADO error payload; `extra.patch_paths` lists the JSON Patch paths (e.g. `["/fields/System.AcceptanceCriteria", ...]`). Use these to see which field path failed. + +3. **Fix field mapping** – If the error is about a missing or wrong field: + - Ensure `.specfact/templates/backlog/field_mappings/ado_custom.yaml` exists and maps your canonical fields to the field names/paths that exist in your ADO project. + - Use `specfact backlog map-fields --ado-org --ado-project ` to discover available fields in the project. + - See [Custom Field Mapping](custom-field-mapping.md) and [Debug Logging – Examining ADO API Errors](../reference/debug-logging.md#examining-ado-api-errors). + +4. **Check project process template** – Custom ADO process templates can rename or remove fields. Align your mapping with the actual work item type and process in the project. + --- ## Getting Help diff --git a/docs/reference/debug-logging.md b/docs/reference/debug-logging.md index 8b3f93c0..e99044c8 100644 --- a/docs/reference/debug-logging.md +++ b/docs/reference/debug-logging.md @@ -61,7 +61,7 @@ When `--debug` is on, the CLI logs: | **auth azure-devops** | Start, success (PAT or OAuth), or error; key steps (OAuth flow, device code) when `--debug` is on. | | **init** | Template resolution: paths tried, success/failure, fallbacks (e.g. development path, package path, `importlib` fallbacks). | | **backlog refine** | File read for import: path, success/error (e.g. `--import-from-tmp`). File write for export: path, success/error (e.g. `--export-to-tmp`). | -| **Azure DevOps adapter** | WIQL request (redacted URL, method, status); Work Items GET (redacted URL, status); Work Items PATCH (redacted URL, status); on failure, error snippet. | +| **Azure DevOps adapter** | WIQL request (redacted URL, method, status); Work Items GET (redacted URL, status); Work Items PATCH (redacted URL, status). On PATCH failure: structured log with `operation=ado_patch`, `status=failed`, and `extra` containing `response_body` (redacted snippet of ADO error payload) and `patch_paths` (JSON Patch paths attempted). | | **GitHub adapter** | API request/response (redacted URL, method, status); on failure, redacted error snippet. | ### Example Log Snippets @@ -97,6 +97,27 @@ When `--debug` is on, the CLI logs: See also [Troubleshooting](../guides/troubleshooting.md). +### Examining ADO API Errors + +When an Azure DevOps PATCH fails (e.g. HTTP 400 during `backlog refine ado` or work item update), the CLI does two things: + +1. **Console** – You see the ADO error message and a short hint (e.g. "Check custom field mapping; see ado_custom.yaml or documentation."). If the ADO message names a field (e.g. "Cannot find field System.AcceptanceCriteria"), that field is highlighted so you can fix mapping or template issues. + +2. **Debug log** (only when `--debug` is on) – One structured line is written with: + - **operation**: `ado_patch` + - **status**: `failed` + - **error**: Parsed ADO message or short summary + - **extra.response_body**: Redacted snippet of the ADO response (up to ~1–2 KB). Use this to see the exact server error (e.g. TF51535, field name). + - **extra.patch_paths**: List of JSON Patch paths that were sent (e.g. `["/fields/System.AcceptanceCriteria", "/fields/System.Description"]`). Use this to see which field path failed. + +To analyze an ADO API error: + +1. Run the command with `--debug` and reproduce the failure. +2. In the console, read the red error line: it contains the ADO message and the custom-mapping hint. +3. Open `~/.specfact/logs/specfact-debug.log` and search for `"operation": "ado_patch"` and `"status": "failed"`. +4. In that line, use `extra.response_body` to see the server’s error text and `extra.patch_paths` to see which field paths were attempted. +5. If the error is about a missing or invalid field (e.g. custom process template), update [custom field mapping](../guides/custom-field-mapping.md) (e.g. `.specfact/templates/backlog/field_mappings/ado_custom.yaml`) or see [Azure DevOps Issues](../guides/troubleshooting.md#azure-devops-issues) in Troubleshooting. + --- ## For Developers diff --git a/lang.json b/lang.json new file mode 100644 index 00000000..90d56b71 --- /dev/null +++ b/lang.json @@ -0,0 +1,942 @@ +[ + { + "id": "apex", + "name": "Apex", + "keys": [ + "apex" + ], + "maturity": "develop", + "exts": [ + ".cls" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [ + "is_proprietary" + ] + }, + { + "id": "bash", + "name": "Bash", + "keys": [ + "bash", + "sh" + ], + "maturity": "alpha", + "exts": [ + ".bash", + ".sh" + ], + "example_ext": ".sh", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [ + "bash", + "sh" + ], + "is_target_language": true, + "tags": [] + }, + { + "id": "c", + "name": "C", + "keys": [ + "c" + ], + "maturity": "alpha", + "exts": [ + ".c", + ".h" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "cairo", + "name": "Cairo", + "keys": [ + "cairo" + ], + "maturity": "alpha", + "exts": [ + ".cairo" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "circom", + "name": "Circom", + "keys": [ + "circom" + ], + "maturity": "develop", + "exts": [ + ".circom" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "clojure", + "name": "Clojure", + "keys": [ + "clojure" + ], + "maturity": "alpha", + "exts": [ + ".clj", + ".cljs", + ".cljc", + ".edn" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "cpp", + "name": "C++", + "keys": [ + "cpp", + "c++" + ], + "maturity": "alpha", + "exts": [ + ".cc", + ".cpp", + ".cxx", + ".c++", + ".pcc", + ".tpp", + ".C", + ".h", + ".hh", + ".hpp", + ".hxx", + ".inl", + ".ipp" + ], + "example_ext": ".cpp", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "csharp", + "name": "C#", + "keys": [ + "csharp", + "c#" + ], + "maturity": "ga", + "exts": [ + ".cs" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "dart", + "name": "Dart", + "keys": [ + "dart" + ], + "maturity": "develop", + "exts": [ + ".dart" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "comment": "'Dockerfile' is the only standard name for Dockerfiles.\nThe extension '.Dockerfile' is cited in the official documentation as\na popular extension. Whatever naming scheme is used in practice and is\nnot ambiguous is welcome here.\n", + "id": "dockerfile", + "name": "Dockerfile", + "keys": [ + "dockerfile", + "docker" + ], + "maturity": "alpha", + "exts": [ + ".dockerfile", + ".Dockerfile", + "Dockerfile", + "dockerfile" + ], + "example_ext": ".dockerfile", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "elixir", + "name": "Elixir", + "keys": [ + "ex", + "elixir" + ], + "maturity": "alpha", + "exts": [ + ".ex", + ".exs" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [ + "is_proprietary" + ] + }, + { + "id": "go", + "name": "Go", + "keys": [ + "go", + "golang" + ], + "maturity": "ga", + "exts": [ + ".go" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "gosu", + "name": "Gosu", + "keys": [ + "gosu" + ], + "maturity": "develop", + "exts": [ + ".gs" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [ + "is_proprietary" + ] + }, + { + "id": "hack", + "name": "Hack", + "keys": [ + "hack" + ], + "maturity": "develop", + "exts": [ + ".hack", + ".hck", + ".hh" + ], + "example_ext": ".hack", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [ + "hhvm" + ], + "is_target_language": true, + "tags": [] + }, + { + "id": "html", + "name": "HTML", + "keys": [ + "html" + ], + "maturity": "alpha", + "exts": [ + ".htm", + ".html" + ], + "example_ext": ".html", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "java", + "name": "Java", + "keys": [ + "java" + ], + "maturity": "ga", + "exts": [ + ".java" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "js", + "name": "JavaScript", + "keys": [ + "js", + "javascript" + ], + "maturity": "ga", + "exts": [ + ".cjs", + ".js", + ".jsx", + ".mjs" + ], + "example_ext": ".jsx", + "excluded_exts": [ + ".min.js" + ], + "reverse_exts": null, + "shebangs": [ + "node", + "js", + "nodejs" + ], + "is_target_language": true, + "tags": [ + "is_js" + ] + }, + { + "id": "json", + "name": "JSON", + "keys": [ + "json" + ], + "maturity": "ga", + "exts": [ + ".json", + ".ipynb" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "jsonnet", + "name": "Jsonnet", + "keys": [ + "jsonnet" + ], + "maturity": "alpha", + "exts": [ + ".jsonnet", + ".libsonnet" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "julia", + "name": "Julia", + "keys": [ + "julia" + ], + "maturity": "alpha", + "exts": [ + ".jl" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "kotlin", + "name": "Kotlin", + "keys": [ + "kt", + "kotlin" + ], + "maturity": "beta", + "exts": [ + ".kt", + ".kts", + ".ktm" + ], + "example_ext": ".kt", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "lisp", + "name": "Lisp", + "keys": [ + "lisp" + ], + "maturity": "alpha", + "exts": [ + ".lisp", + ".cl", + ".el" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "lua", + "name": "Lua", + "keys": [ + "lua" + ], + "maturity": "alpha", + "exts": [ + ".lua" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [ + "lua" + ], + "is_target_language": true, + "tags": [] + }, + { + "comment": "Move language with SUI flavor", + "id": "move_on_sui", + "name": "Move on Sui", + "keys": [ + "move_on_sui" + ], + "maturity": "develop", + "exts": [ + ".move" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "comment": "Move language with Aptos flavor", + "id": "move_on_aptos", + "name": "Move on Aptos", + "keys": [ + "move_on_aptos" + ], + "maturity": "develop", + "exts": [ + ".move" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "ocaml", + "name": "OCaml", + "keys": [ + "ocaml" + ], + "maturity": "alpha", + "exts": [ + ".ml", + ".mli" + ], + "example_ext": ".ml", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [ + "ocaml", + "ocamlscript" + ], + "is_target_language": true, + "tags": [] + }, + { + "id": "php", + "name": "PHP", + "keys": [ + "php" + ], + "maturity": "ga", + "exts": [ + ".php", + ".tpl", + ".phtml" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [ + "php" + ], + "is_target_language": true, + "tags": [] + }, + { + "id": "promql", + "name": "Prometheus Query Language", + "keys": [ + "promql" + ], + "maturity": "alpha", + "exts": [ + ".promql" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "protobuf", + "name": "Protocol Buffers", + "keys": [ + "proto", + "protobuf", + "proto3" + ], + "maturity": "develop", + "exts": [ + ".proto" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "python2", + "name": "Python 2", + "keys": [ + "python2" + ], + "maturity": "develop", + "exts": [ + ".py", + ".pyi" + ], + "example_ext": ".py", + "excluded_exts": [], + "reverse_exts": [], + "shebangs": [ + "python", + "python2" + ], + "is_target_language": true, + "tags": [ + "is_python" + ] + }, + { + "id": "python3", + "name": "Python 3", + "keys": [ + "python3" + ], + "maturity": "develop", + "exts": [ + ".py", + ".pyi" + ], + "example_ext": ".py", + "excluded_exts": [], + "reverse_exts": [], + "shebangs": [ + "python", + "python3" + ], + "is_target_language": true, + "tags": [ + "is_python" + ] + }, + { + "id": "python", + "name": "Python", + "keys": [ + "py", + "python" + ], + "maturity": "ga", + "exts": [ + ".py", + ".pyi" + ], + "example_ext": ".py", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [ + "python", + "python2", + "python3" + ], + "is_target_language": true, + "tags": [ + "is_python" + ] + }, + { + "id": "ql", + "name": "QL", + "keys": [ + "ql" + ], + "maturity": "alpha", + "exts": [ + ".ql", + ".qll" + ], + "example_ext": ".ql", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "r", + "name": "R", + "keys": [ + "r" + ], + "maturity": "alpha", + "exts": [ + ".r", + ".R" + ], + "example_ext": ".R", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "ruby", + "name": "Ruby", + "keys": [ + "ruby" + ], + "maturity": "ga", + "exts": [ + ".rb" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [ + "ruby" + ], + "is_target_language": true, + "tags": [] + }, + { + "id": "rust", + "name": "Rust", + "keys": [ + "rust" + ], + "maturity": "alpha", + "exts": [ + ".rs" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [ + "run-cargo-script" + ], + "is_target_language": true, + "tags": [] + }, + { + "id": "scala", + "name": "Scala", + "keys": [ + "scala" + ], + "maturity": "ga", + "exts": [ + ".scala" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [ + "scala" + ], + "is_target_language": true, + "tags": [] + }, + { + "id": "scheme", + "name": "Scheme", + "keys": [ + "scheme" + ], + "maturity": "alpha", + "exts": [ + ".scm", + ".ss" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "solidity", + "name": "Solidity", + "keys": [ + "solidity", + "sol" + ], + "maturity": "alpha", + "exts": [ + ".sol" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "swift", + "name": "Swift", + "keys": [ + "swift" + ], + "maturity": "alpha", + "exts": [ + ".swift" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "terraform", + "name": "Terraform", + "keys": [ + "tf", + "hcl", + "terraform" + ], + "maturity": "ga", + "exts": [ + ".tf", + ".hcl", + ".tfvars" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "ts", + "name": "TypeScript", + "keys": [ + "ts", + "typescript" + ], + "maturity": "ga", + "exts": [ + ".ts", + ".tsx" + ], + "example_ext": ".tsx", + "excluded_exts": [ + ".d.ts" + ], + "reverse_exts": null, + "shebangs": [ + "ts-node" + ], + "is_target_language": true, + "tags": [ + "is_js" + ] + }, + { + "id": "vue", + "name": "Vue", + "keys": [ + "vue" + ], + "maturity": "develop", + "exts": [ + ".vue" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "xml", + "name": "XML", + "keys": [ + "xml" + ], + "maturity": "alpha", + "exts": [ + ".xml", + ".plist" + ], + "example_ext": ".xml", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "id": "yaml", + "name": "YAML", + "keys": [ + "yaml" + ], + "maturity": "alpha", + "exts": [ + ".yml", + ".yaml" + ], + "example_ext": ".yaml", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": true, + "tags": [] + }, + { + "comment": "This can be used in rules as a target selector that selects\nall the files regardless of their extension or contents.\nWhen no target analyzer is specified, the spacegrep engine shall\nbe used.\n", + "id": "generic", + "name": "Generic", + "keys": [ + "generic", + "spacegrep" + ], + "maturity": "alpha", + "exts": [ + "" + ], + "example_ext": ".generic", + "excluded_exts": [], + "reverse_exts": null, + "shebangs": [], + "is_target_language": false, + "tags": [] + }, + { + "comment": "Alternative engine for generic files", + "id": "aliengrep", + "name": "Aliengrep", + "keys": [ + "aliengrep" + ], + "maturity": "develop", + "exts": [ + "" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": [], + "shebangs": [], + "is_target_language": false, + "tags": [] + }, + { + "comment": "This can be used in rules as a target selector that selects\nall the files regardless of their extension or contents.\nWhen no target analyzer is specified, the regex engine shall be used.\n", + "id": "regex", + "name": "regex", + "keys": [ + "regex", + "none" + ], + "maturity": "develop", + "exts": [ + "" + ], + "example_ext": null, + "excluded_exts": [], + "reverse_exts": [], + "shebangs": [], + "is_target_language": false, + "tags": [] + } +] \ No newline at end of file diff --git a/openspec/changes/add-thorough-codebase-validation/design.md b/openspec/changes/add-thorough-codebase-validation/design.md new file mode 100644 index 00000000..fb2c2fbe --- /dev/null +++ b/openspec/changes/add-thorough-codebase-validation/design.md @@ -0,0 +1,70 @@ +# Design: Thorough Codebase Validation Depth + +## Overview + +This change adds a clear, documented path for thorough in-depth codebase validation in three modes: (a) sidecar for unmodified code, (b) contract-decorated codebases (full contract-test stack), (c) dogfooding specfact-cli on itself. No new external systems; integration is with existing repro, sidecar, and contract-test tooling. + +## Architecture + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ Validation modes β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ +β”‚ (1) Quick check specfact repro [--repo PATH] β”‚ +β”‚ β†’ ruff, semgrep, basedpyright, CrossHair (budget), pytest β”‚ +β”‚ β†’ Optional: --sidecar --sidecar-bundle NAME β”‚ +β”‚ β”‚ +β”‚ (2) Thorough (contracts) hatch run contract-test-full β”‚ +β”‚ β†’ contract-test-contracts + contract-test-exploration + scenarios β”‚ +β”‚ β†’ Use when repo has @icontract / @beartype β”‚ +β”‚ β”‚ +β”‚ (3) Dogfooding specfact repro --repo . && contract-test-full β”‚ +β”‚ β†’ Same as (1)+(2) on specfact-cli repo; optional --sidecar β”‚ +β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +## Integration Points + +### Repro Checker + +- **Current**: `ReproChecker.run_all_checks()` runs ruff, semgrep, basedpyright, CrossHair (with budget), optional pytest contracts/smoke. CrossHair targets: either modules with "CrossHair property test" marker or all expanded src/tools. +- **Change**: Optional "deep" mode (e.g. `--validation deep` or `--crosshair-timeout N`): when set, pass higher per-path timeout to CrossHair or restrict targets to a configurable list (e.g. critical adapters). Implementation: extend repro CLI options and pass through to `_build_crosshair_env` / CrossHair command; optional config file or env for target list. +- **Contract**: No change to default behavior; new options are additive. + +### Sidecar + +- **Current**: `specfact repro --sidecar --sidecar-bundle NAME` runs main checks then `run_sidecar_validation()` (unannotated detection, harness generation, CrossHair/Specmatic). No-edit path. +- **Change**: Document sidecar as the "thorough validation for unmodified code" path; ensure error messages and docs direct users to install CrossHair when sidecar is used. No code change required for sidecar itself unless we add a "deep" CrossHair option for sidecar runs (optional follow-up). + +### Contract-Test Stack + +- **Current**: `hatch run contract-test` (auto), `contract-test-contracts`, `contract-test-exploration`, `contract-test-scenarios`, `contract-test-full`. CrossHair timeout is configurable via STANDARD_CROSSHAIR_TIMEOUT and fast variant. +- **Change**: Document `contract-test-full` as the recommended thorough path for contract-decorated codebases. Optional: add hatch script or doc for "contract-test-exploration-deep" with higher timeout (e.g. 60s per path) for critical modules; can be a one-line script that invokes crosshair check with args. No change to contract_first_smart_test.py unless we add a "deep" exploration variant. + +### CI / Dogfooding + +- **Current**: CI may run repro, tests, lint separately. +- **Change**: Document dogfooding as: (1) `specfact repro --repo .`, (2) `hatch run contract-test-full` (or equivalent), (3) optional `specfact repro --sidecar --sidecar-bundle `. Optionally add a CI job (e.g. `validate-thorough`) that runs (1)+(2) so specfact-cli validates itself on every PR or nightly. Job definition lives in `.github/workflows/`; this change can add the job or only document the commands for manual/periodic runs. + +## Optional Deep CrossHair + +- **Repro**: Add `--crosshair-per-path-timeout N` (default unchanged) so users can increase depth for repro runs. Implement by appending to CrossHair command in ReproChecker when building `crosshair_base`. +- **Config**: Optional `[tool.specfact]` or env (e.g. `SPECFACT_CROSSHAIR_DEEP_MODULES`) listing modules for deep-only runs; if present, repro could run CrossHair twice (normal pass + deep pass on listed modules) or once with higher timeout on listed modules. Prefer simple CLI flag first; config in a follow-up. + +## Contract Enforcement + +- New or modified public APIs (e.g. repro CLI options) must keep `@icontract` and `@beartype` where applicable. ReproChecker and sidecar orchestrator already have contracts; new code paths must follow the same pattern. +- No new adapters or bridge protocols; validation is local to CLI and hatch scripts. + +## Risks and Mitigations + +- **CrossHair timeout increase**: Longer timeouts can make CI slow. Mitigation: deep mode is opt-in; default budget unchanged; document recommended timeouts for CI vs. local. +- **Documentation drift**: Mitigation: single "Thorough codebase validation" section with copy-paste commands; link from README so it stays discoverable. + +## Out of Scope + +- New testing framework or external services. +- Changing default repro or contract-test behavior for users who do not opt in. +- Implementing full "validate thoroughly" as a single new CLI command (documented composition of repro + contract-test-full is sufficient for this change; a single command can be a follow-up). diff --git a/openspec/changes/add-thorough-codebase-validation/proposal.md b/openspec/changes/add-thorough-codebase-validation/proposal.md new file mode 100644 index 00000000..0c3378bf --- /dev/null +++ b/openspec/changes/add-thorough-codebase-validation/proposal.md @@ -0,0 +1,32 @@ +# Change: Add thorough in-depth codebase validation for sidecar, contract-decorated codebases, and dogfooding + +## Why + +Runtime bugs often slip past contract decorators and tests because: (a) contracts only fire at decorated boundaries when those paths are executed; (b) CrossHair in `specfact repro` shares a single time budget over all source and may not reach deep paths; (c) Semgrep is pattern-based and does not reason about logic. Users need a reliable way to validate codebases in three modes: (a) **Sidecar**β€”unmodified original code, no edits to target repo; (b) **Existing codebases**β€”when code already uses `@icontract`/`@beartype`, run full contract exploration and scenario tests; (c) **Dogfooding**β€”use SpecFact CLI to validate SpecFact CLI itself so the pipeline is proven on real complexity. Adding clear workflows, CI integration, and optional deeper CrossHair/Semgrep usage makes in-depth validation repeatable and enables production-grade confidence. + +## What Changes + +- **EXTEND**: Document and wire a single "thorough validation" path that supports: (1) sidecar for unmodified code (existing `specfact repro --sidecar --sidecar-bundle`), (2) contract-decorated codebases via `hatch run contract-test-full` (contracts + CrossHair exploration + scenarios), (3) dogfooding by running that path on the specfact-cli repo. +- **EXTEND**: Ensure `specfact repro` (with optional `--sidecar`) and the contract-test layers are clearly documented as the recommended in-depth validation flow; add a repro option or doc section for "deep" CrossHair (e.g. higher per-path timeout or focused modules) so users can target critical paths. +- **NEW**: Add a small validation-mode or preset (e.g. `--validation deep` or documented `specfact repro` + `hatch run contract-test-exploration` with increased timeout) so CI or local runs can explicitly request thorough validation without editing target code. +- **EXTEND**: Optional CrossHair target selection: allow repro or config to restrict CrossHair to a list of modules (e.g. critical adapters) with higher per-path timeout so budget is spent where it matters. +- **EXTEND**: Document dogfooding: how to run full validation (repro + contract-test-full or equivalent) on specfact-cli; add or reference a CI job or local checklist so specfact-cli validates itself before release. + +## Capabilities + +- **codebase-validation-depth**: Thorough in-depth validation supporting sidecar (unmodified code), contract-decorated codebases (full contract-test stack), and dogfooding (specfact-cli on itself) with clear workflows and optional deep CrossHair/Semgrep usage. + +## Impact + +- **Affected specs**: New `openspec/specs/codebase-validation-depth/spec.md` (or under existing validation/sidecar specs if preferred). +- **Affected code**: `src/specfact_cli/commands/repro.py` (optional deep-validation mode or flags), `src/specfact_cli/validators/repro_checker.py` (optional CrossHair target list / per-path timeout override), config or docs for contract-test + repro combination; possibly `pyproject.toml` or hatch scripts for a single "validate thoroughly" command. +- **Affected documentation** (): Add or extend a reference section for "Thorough codebase validation" covering: (1) sidecar for unmodified code, (2) contract-decorated codebases (contract-test-full), (3) dogfooding specfact-cli; document optional deep CrossHair and Semgrep usage; update README or getting-started if needed. No new top-level pages required if content fits in reference/validation. +- **Integration points**: Existing `specfact repro`, `specfact validate sidecar`, `hatch run contract-test-*`, CrossHair, Semgrep; CI workflows (optional new or updated job for thorough validation). +- **Backward compatibility**: Additive only; existing repro and sidecar behavior unchanged unless user opts into deep mode or new flags. + +## Source Tracking + +- **GitHub Issue**: #163 +- **Issue URL**: +- **Repository**: nold-ai/specfact-cli +- **Last Synced Status**: proposed diff --git a/openspec/changes/add-thorough-codebase-validation/specs/codebase-validation-depth/spec.md b/openspec/changes/add-thorough-codebase-validation/specs/codebase-validation-depth/spec.md new file mode 100644 index 00000000..9d5f0940 --- /dev/null +++ b/openspec/changes/add-thorough-codebase-validation/specs/codebase-validation-depth/spec.md @@ -0,0 +1,131 @@ +# Codebase Validation Depth + +## ADDED Requirements + +### Requirement: Sidecar Validation for Unmodified Code + +The CLI SHALL support thorough in-depth validation of a target repository without modifying the target's source (sidecar mode). + +**Rationale**: Users need to validate third-party or legacy codebases where adding contract decorators or changing code is not an option. + +#### Scenario: Run Sidecar Validation on Unmodified Repo + +**Given**: A repository with no contract decorators and a valid sidecar bundle name + +**When**: The user runs `specfact repro --repo --sidecar --sidecar-bundle ` + +**Then**: SpecFact runs main repro checks (lint, semgrep, type-check, CrossHair if available) and then sidecar validation (unannotated detection, harness generation, CrossHair/Specmatic on generated harnesses) without editing the target repo + +**Acceptance Criteria**: + +- Sidecar runs after main repro checks when `--sidecar` and `--sidecar-bundle` are provided +- Unannotated code is detected; harnesses are generated in a no-edit path +- User receives a summary (e.g. CrossHair confirmed/not confirmed/violations) for unannotated code +- No files in the target repo are modified by sidecar validation + +#### Scenario: Sidecar Optional When CrossHair Unavailable + +**Given**: CrossHair is not installed in the target repo environment + +**When**: The user runs `specfact repro --sidecar --sidecar-bundle ` + +**Then**: Main repro checks run; sidecar is attempted and reports clearly (e.g. skipped or partial) when CrossHair or dependencies are missing, without failing the entire run if sidecar is advisory + +**Acceptance Criteria**: + +- Clear messaging when sidecar cannot run (tool missing, bundle invalid) +- Non-zero exit only for main check failures; sidecar failure can be advisory per existing repro behavior + +--- + +### Requirement: Thorough Validation for Contract-Decorated Codebases + +The CLI and project tooling SHALL support a documented "thorough" validation path for repositories that already use `@icontract` and `@beartype`. + +**Rationale**: Existing codebases with contracts should be able to run full contract exploration and scenario tests in a single, repeatable flow. + +#### Scenario: Run Full Contract-Stack Validation + +**Given**: A repository with contract decorators on public APIs and a standard layout (src/, tests/) + +**When**: The user runs the full contract-test stack (e.g. `hatch run contract-test-full` or equivalent: contract validation + CrossHair exploration + scenario tests) + +**Then**: All layers run (runtime contract validation, CrossHair exploration, scenario/E2E tests) and results are reported; exit code reflects failures + +**Acceptance Criteria**: + +- `hatch run contract-test-full` (or documented equivalent) runs contracts, exploration, and scenarios +- Exploration layer uses CrossHair with configurable timeout (e.g. from `[tool.crosshair]` or env) +- Documentation states that this is the recommended "thorough" path for contract-decorated codebases +- CI can invoke this path for PR validation + +#### Scenario: CrossHair Exploration with Increased Depth + +**Given**: A user or CI wants deeper CrossHair analysis on critical modules + +**When**: The user runs CrossHair with higher per-path timeout (e.g. via `crosshair check --per_path_timeout=60 ` or a documented repro/contract-test option) + +**Then**: Critical modules are analyzed with longer timeout so deeper paths can be explored; results are reported + +**Acceptance Criteria**: + +- Documented way to run CrossHair with higher per-path timeout (CLI flag, config, or hatch script) +- Optional list of modules for "deep" exploration (e.g. config or flag) so budget is spent on critical paths +- No change to default repro budget unless user opts in + +--- + +### Requirement: Dogfooding SpecFact CLI on Itself + +The project SHALL document and support using SpecFact's own validation pipeline to verify the specfact-cli repository (dogfooding). + +**Rationale**: Proves the pipeline on real complexity and catches regressions before release. + +#### Scenario: Run Thorough Validation on SpecFact CLI Repo + +**Given**: The specfact-cli repository with existing contracts, tests, and sidecar capability + +**When**: A maintainer runs the documented dogfooding validation (e.g. `specfact repro --repo .` plus `hatch run contract-test-full`, optionally `specfact repro --sidecar --sidecar-bundle `) + +**Then**: All applicable checks run (repro: lint, semgrep, type-check, CrossHair; contract-test-full: contracts, exploration, scenarios); results are reported; exit code reflects pass/fail + +**Acceptance Criteria**: + +- Documentation describes the exact commands and order for dogfooding (repro + contract-test-full; optional sidecar) +- CI or release checklist can include these steps so specfact-cli validates itself before release +- No new repo-specific code required beyond existing repro and contract-test; documentation and optional CI job are sufficient + +#### Scenario: Dogfooding Includes Optional Sidecar + +**Given**: SpecFact CLI repo and a sidecar bundle that includes specfact-cli + +**When**: Maintainer runs `specfact repro --repo . --sidecar --sidecar-bundle ` + +**Then**: Main repro checks run; sidecar runs on unannotated code in specfact-cli and reports CrossHair/sidecar results + +**Acceptance Criteria**: + +- Sidecar can target specfact-cli repo when bundle is configured +- Documented as optional step for dogfooding to expand coverage to unannotated code + +--- + +### Requirement: Clear Documentation of Validation Modes + +The documentation SHALL describe three validation modes: (1) quick check (repro), (2) thorough contract-decorated (contract-test-full), (3) sidecar for unmodified code, and (4) dogfooding. + +**Rationale**: Users need to choose the right mode for their context (unmodified repo vs. contract-decorated vs. validating SpecFact itself). + +#### Scenario: User Chooses Validation Mode from Docs + +**Given**: User wants to validate a codebase (own repo with contracts / third-party unmodified / specfact-cli itself) + +**When**: User reads the "Thorough codebase validation" (or equivalent) section in docs + +**Then**: User finds: (a) when to use sidecar (unmodified code), (b) when to use contract-test-full (contract-decorated), (c) how to dogfood specfact-cli; and the exact commands or presets + +**Acceptance Criteria**: + +- Single reference section or guide covering all three use cases +- Commands are copy-pasteable; any required env or config is stated +- Link from README or getting-started to this section where appropriate diff --git a/openspec/changes/add-thorough-codebase-validation/tasks.md b/openspec/changes/add-thorough-codebase-validation/tasks.md new file mode 100644 index 00000000..bdb8f68e --- /dev/null +++ b/openspec/changes/add-thorough-codebase-validation/tasks.md @@ -0,0 +1,51 @@ +# Tasks: Add thorough in-depth codebase validation (sidecar, contract-decorated, dogfooding) + +## 1. Create git branch from dev + +- [ ] 1.1 Ensure we're on dev and up to date: `git checkout dev && git pull origin dev` +- [ ] 1.2 Create branch: `git checkout -b feature/add-thorough-codebase-validation` (or `gh issue develop --repo nold-ai/specfact-cli --name feature/add-thorough-codebase-validation --checkout` if issue exists) +- [ ] 1.3 Verify branch was created: `git branch --show-current` + +## 2. Verify spec deltas (SDD: specs first) + +- [ ] 2.1 Confirm `specs/codebase-validation-depth/spec.md` exists and is complete (ADDED requirements, Given/When/Then scenarios). +- [ ] 2.2 Map scenarios to implementation: Sidecar unmodified, Sidecar optional when CrossHair missing, Full contract-stack, CrossHair deep, Dogfooding commands, Dogfooding optional sidecar, Documentation of validation modes. + +## 3. Optional: Deep CrossHair / repro options + +- [ ] 3.1 In `src/specfact_cli/commands/repro.py`: add optional `--crosshair-per-path-timeout N` (default: use existing budget behavior) so users can increase CrossHair depth for repro runs. +- [ ] 3.2 In `src/specfact_cli/validators/repro_checker.py`: when building CrossHair command, append `--per_path_timeout N` when repro option is set; keep default unchanged. +- [ ] 3.3 Add unit or integration test that repro with `--crosshair-per-path-timeout` passes through to CrossHair command (or skip if deferred to docs-only). +- [ ] 3.4 Run format and type-check: `hatch run format`, `hatch run type-check`. + +## 4. Documentation: Thorough codebase validation + +- [ ] 4.1 Add or extend a reference section "Thorough codebase validation" (e.g. in `docs/reference/` or under existing validation doc) covering: (1) quick check (`specfact repro`), (2) thorough contract-decorated (`hatch run contract-test-full`), (3) sidecar for unmodified code (`specfact repro --sidecar --sidecar-bundle `), (4) dogfooding (repro + contract-test-full on specfact-cli; optional sidecar). +- [ ] 4.2 Document optional deep CrossHair: how to run CrossHair with higher per-path timeout (repro flag or `crosshair check --per_path_timeout=60 `); optional module list for critical paths. +- [ ] 4.3 Add dogfooding checklist or CI note: exact commands and order for validating specfact-cli (repro + contract-test-full; optional sidecar); link from README or contributing guide if appropriate. +- [ ] 4.4 Ensure docs are copy-pasteable; state any required env or config (e.g. `[tool.crosshair]`, sidecar bundle). +- [ ] 4.5 If adding a new doc page: set front-matter (layout, title, permalink, description) and update `docs/_layouts/default.html` sidebar if needed. + +## 5. Optional: CI job for thorough validation (dogfooding) + +- [ ] 5.1 Add or update a CI job (e.g. in `.github/workflows/`) that runs `specfact repro --repo .` and `hatch run contract-test-full` (or equivalent) so specfact-cli validates itself on PR or nightly. Use reasonable timeouts to avoid flakiness. +- [ ] 5.2 Document the job in the "Thorough codebase validation" section; mark as optional if job is added in a follow-up. + +## 6. Quality gates + +- [ ] 6.1 Run format and type-check: `hatch run format`, `hatch run type-check`. +- [ ] 6.2 Run contract test: `hatch run contract-test`. +- [ ] 6.3 Run full test suite: `hatch run smart-test-full` (or `hatch test --cover -v`). +- [ ] 6.4 Ensure any new or modified public APIs have `@icontract` and `@beartype` where applicable. + +## 7. Documentation research and review (per openspec/config.yaml) + +- [ ] 7.1 Identify affected documentation: new or extended "Thorough codebase validation" section; README or contributing link if added; no new top-level pages unless created in task 4. +- [ ] 7.2 Verify front-matter and sidebar if a new page was added; confirm no broken links. + +## 8. Create Pull Request to dev + +- [ ] 8.1 Ensure all changes are committed: `git add .` and `git commit -m "feat: add thorough codebase validation (sidecar, contract-decorated, dogfooding)"` +- [ ] 8.2 Push to remote: `git push origin feature/add-thorough-codebase-validation` +- [ ] 8.3 Create PR: `gh pr create --repo nold-ai/specfact-cli --base dev --head feature/add-thorough-codebase-validation --title "feat: add thorough codebase validation (sidecar, contract-decorated, dogfooding)" --body-file ` (use repo PR template; add OpenSpec change ID `add-thorough-codebase-validation` and summary). +- [ ] 8.4 Verify PR and branch are linked to issue (if issue was created) in Development section. diff --git a/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/CHANGE_VALIDATION.md b/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/CHANGE_VALIDATION.md new file mode 100644 index 00000000..e8ca5d67 --- /dev/null +++ b/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/CHANGE_VALIDATION.md @@ -0,0 +1,85 @@ +# Change Validation Report: improve-ado-backlog-refine-error-logging + +**Validation Date**: 2026-01-29 +**Change Proposal**: [proposal.md](./proposal.md) +**Validation Method**: Dry-run simulation and production-grade UX review +**Source**: [GitHub Issue #162](https://github.com/nold-ai/specfact-cli/issues/162) + +## Executive Summary + +- **Breaking Changes**: 0 detected +- **Dependent Files**: Limited to `src/specfact_cli/adapters/ado.py` and optionally `runtime.py`; no public API changes +- **Impact Level**: Low +- **Validation Result**: Pass +- **Production-grade UX**: Reviewed; recommendations below for implementation phase + +## Breaking Changes Detected + +None. Change only extends failure paths (capture response, log in debug, surface message + hint). Success paths and public method signatures unchanged. + +## Dependencies Affected + +- **Critical**: None +- **Recommended**: Ensure all ADO PATCH call sites (backlog refine body, status update, comment, create work item) use the same helper so behavior is consistent +- **Optional**: Extend `debug-logging` main spec with a short β€œAPI failure logging” requirement so future adapters follow the same pattern + +## Impact Assessment + +- **Code Impact**: ADO adapter only; internal helper and extended exception handling +- **Test Impact**: New unit tests for helper and for user message content; optional integration test for debug log content +- **Documentation Impact**: Optional: add a doc link in the user-facing hint (e.g. ADO custom mapping) and/or one line β€œRun with --debug and check ~/.specfact/logs for full response and patch paths” +- **Release Impact**: Patch (fix + improved UX) + +## Production-Grade UX Review + +### What the change gets right (strong) + +1. **User sees cause without --debug**: ADO message (e.g. β€œCannot find field System.AcceptanceCriteria”) and mapping hint are required in the console message, so users can act without enabling debug. +2. **Debug log has full context**: Response body snippet (redacted, truncated) and list of patch paths make the failing field obvious and support custom templates and future improvements. +3. **Consistency**: Same behavior at all ADO PATCH sites (refine body, status, comment, create) avoids inconsistent UX. +4. **Safe logging**: Truncation and redaction avoid leaking secrets and huge payloads. + +### Recommendations for implementation (make it β€œreally good”) + +1. **Actionable hint with doc link**: In implementation, include a concrete link in the hint when available (e.g. β€œSee https://github.com/nold-ai/specfact-cli/docs/... or ado_custom.yaml”) so corporate users can resolve mapping issues without searching. +2. **Optional --debug pointer**: Consider appending to the hint: β€œRun with --debug and check ~/.specfact/logs for full response and patch paths.” so users know how to get more detail when needed. +3. **Log every failed attempt**: In the backlog-refine PATCH path there are multiple retries (omit multilineFieldsFormat, replace add with replace, HTML fallback). Call the logging helper before each retry and on final failure so the debug log shows the sequence of attempts and the final response/paths. +4. **Highlight field name in message**: When the ADO message contains a field reference (e.g. β€œSystem.AcceptanceCriteria”), consider quoting or emphasizing it in the user message (e.g. β€œField β€˜System.AcceptanceCriteria’ not found. Check custom field mapping…”) so the failing field is obvious at a glance. + +### Production readiness + +The change is sufficient to fix issue #162 and is suitable for production. Applying the four recommendations above will make error UX and debug usefulness β€œreally good” for all customer sizes and complex infrastructures, and will make future bug reports (e.g. other custom templates) easier to diagnose. + +## Format Validation + +- **proposal.md Format**: Pass + - Title: `# Change: ...` + - Sections: Why, What Changes, Capabilities, Impact, Source Tracking + - Capabilities: api-error-diagnostics with spec file +- **tasks.md Format**: Pass + - Branch creation first (task 1), PR creation last (task 5) + - Numbered tasks and sub-tasks; quality gates (format, type-check, tests) +- **specs/api-error-diagnostics/spec.md Format**: Pass + - Delta header: `## ADDED Requirements` + - Requirements with `#### Scenario:` blocks +- **design.md Format**: Pass + - Bridge adapter integration, error handling strategy, sequence diagram + +## OpenSpec Validation + +- **Status**: Pass +- **Validation Command**: `openspec validate improve-ado-backlog-refine-error-logging --strict` +- **Issues Found**: 0 (after adding `## ADDED Requirements` to spec) +- **Re-validated**: Yes + +## Validation Artifacts + +- Change directory: `openspec/changes/improve-ado-backlog-refine-error-logging/` +- Plan source: `specfact-cli-internal/docs/internal/implementation/2026-01-29-ado-backlog-refine-error-logging-plan.md` + +## Next Steps + +1. Review this validation report and the production-grade UX recommendations. +2. Implement the change (branch `bugfix/improve-ado-backlog-refine-error-logging`, then code and tests). +3. During implementation, apply the four β€œreally good” recommendations where feasible (doc link, --debug pointer, log each attempt, highlight field name). +4. Run full test suite and create PR to `dev` with Fixes nold-ai/specfact-cli#162. diff --git a/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/design.md b/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/design.md new file mode 100644 index 00000000..9e4d4ecc --- /dev/null +++ b/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/design.md @@ -0,0 +1,55 @@ +# Design: Improve ADO backlog refine error logging and user-facing error UX + +## Bridge adapter integration + +- **Scope**: ADO adapter (`src/specfact_cli/adapters/ado.py`) only for this change. The pattern (capture response + operations, log in debug, surface message + hint) can be reused for GitHub or other adapters in a follow-up. +- **Existing hooks**: `debug_log_operation(operation, target, status, error=..., extra=...)` already supports `extra`; we will pass `response_body` (truncated/redacted) and `patch_paths` (list of strings). No change to `runtime.py` contract unless we need a shared truncation helper. +- **Redaction**: Use existing `LoggerSetup.redact_secrets` for any string or dict passed into `extra` so tokens and URLs are redacted before writing to the debug log file. + +## Error handling strategy + +1. **Capture**: On `requests.HTTPError` (or any PATCH failure), obtain `e.response`; parse `e.response.json()` for `message`; fallback to `e.response.text[:500]`. Extract patch paths from the `operations` list (the document we sent): `[op.get("path") for op in operations]`. +2. **Debug log**: If `is_debug_mode()`, call `debug_log_operation("ado_patch", url_redacted, "failed", error=message_or_str(e), extra={"response_body": snippet, "patch_paths": paths})`. Truncate snippet to ~1–2 KB; redact via `LoggerSetup.redact_secrets(snippet)` or redact the whole `extra` dict. +3. **User message**: Before re-raise, build a user-facing string: `f"{ado_message} Check custom field mapping; see ado_custom.yaml or documentation."` (or similar). Either set this as the exception message (e.g. wrap in a custom exception or replace `e.args`) or ensure `console.print(...)` is called with this message so the user always sees it. +4. **Re-raise**: Re-raise the original exception (or a wrapper that preserves cause) so callers and tests still get an exception; the console message is already printed so the user has context. + +## Sequence (backlog refine PATCH failure) + +``` +User CLI ado.py ADO API + | | | | + | refine | | | + | --write | PATCH body | PATCH /workitems/:id | + | ----------> | -----------------> | ----------------------> | + | | | 400 | + | | | <----------------------| + | | | parse response.message | + | | | extract patch_paths | + | | | if debug: log_operation| + | | | console.print(msg+hint)| + | | | raise | + | | <----------------- | | + | [red] msg | print already | | + | + hint | done in ado.py | | + | <----------- | | | +``` + +## Contract enforcement + +- No new public API surface; only internal helper (e.g. `_log_ado_patch_failure`) and extended behavior of existing PATCH paths. Existing `@icontract` and `@beartype` on public methods remain; the helper can be private and optionally typed. +- Tests: unit tests for the helper (given mock response and operations, assert debug_log_operation called with correct extra; assert user message contains ADO text and hint). Integration test: simulate 400 with JSON body, run with `--debug`, assert debug log file contains patch paths and response snippet. + +## Risks and mitigations + +- **Large response body**: Truncate to 1–2 KB and redact; avoid logging huge HTML or JSON. +- **Non-JSON response**: Use `response.text[:N]` and do not fail; log the truncated text. +- **Regression on success path**: No change to success path; only failure branches are extended. + +## Production-grade UX (implementation guidance) + +To make error UX and debug usefulness β€œreally good” for all customer sizes and complex infrastructures: + +1. **Actionable hint with doc link**: Include a concrete doc link in the user message when available (e.g. ADO custom mapping docs) so users can resolve mapping issues without searching. +2. **Optional --debug pointer**: Append to the hint: β€œRun with --debug and check ~/.specfact/logs for full response and patch paths.” so users know how to get more detail. +3. **Log every failed attempt**: In paths with retries (e.g. backlog-refine PATCH), call the logging helper before each retry and on final failure so the debug log shows the sequence of attempts and the final response/paths. +4. **Highlight field name**: When the ADO message contains a field reference (e.g. β€œSystem.AcceptanceCriteria”), quote or emphasize it in the user message (e.g. β€œField β€˜System.AcceptanceCriteria’ not found. Check custom field mapping…”) so the failing field is obvious at a glance. diff --git a/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/proposal.md b/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/proposal.md new file mode 100644 index 00000000..c889bd67 --- /dev/null +++ b/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/proposal.md @@ -0,0 +1,33 @@ +# Change: Improve ADO backlog refine error logging and user-facing error UX + +## Why + +When running SpecFact backlog refinement writeback against Azure DevOps (`specfact backlog refine ado ... --import-from-tmp --write`), API failures (e.g. HTTP 400 due to custom ADO process templates missing fields like `System.AcceptanceCriteria`) produce a generic "400 Client Error: Bad Request" with no indication of which field or patch operation failed. Even with `--debug`, logs do not include the ADO response body or the JSON Patch paths, so root-cause diagnosis required local instrumentation. This blocks production use in enterprises with custom ADO templates and prevents users from self-serving (e.g. applying custom mapping) without maintainer help. Improving error diagnostics and user-facing messages makes the CLI production-grade for all customer sizes and enables faster feedback loops for future improvements. + +## What Changes + +- **EXTEND**: On ADO PATCH failure (backlog refine body update, status update, comment, create work item), capture response status, parsed ADO message (e.g. from `response.json().get("message", response.text[:500])`), and the list of JSON Patch operation paths; in debug mode log via `debug_log_operation(..., extra={"response_body": safe_snippet, "patch_paths": [...]})` so the failing field is identifiable without code changes. +- **EXTEND**: When re-raising or surfacing the error to the user, include the ADO error message (e.g. "TF51535: Cannot find field System.AcceptanceCriteria") and a short, actionable hint (e.g. "Check custom field mapping; see ado_custom.yaml or docs."). +- **NEW**: Add a small helper (e.g. in `ado.py`) that, given `response` (or `HTTPError.response`) and the patch `operations`, builds a structured error summary (status_code, message, patch_paths) and optionally logs it via `debug_log_operation`; use this helper at all ADO PATCH failure sites for consistency. +- **EXTEND**: Ensure `debug_log_operation` (or callers) safely truncate and redact response body in `extra` (e.g. 1–2 KB max, redact via `LoggerSetup.redact_secrets`) so ADO error payloads are safe to log. +- **EXTEND**: Apply the same error-handling and debug-logging pattern to other ADO PATCH call sites (status update, comment, create work item) so behavior is consistent across the adapter. +- **OPTIONAL (follow-up)**: Document custom mapping and `ado_custom.yaml` in user-facing docs; consider pre-flight field existence validation in a later change. + +## Capabilities + +- **api-error-diagnostics**: Structured API error capture, debug log content (response body snippet, patch paths), and user-facing error messages for ADO (and consistent pattern for other adapters). + +## Impact + +- **Affected specs**: `openspec/specs/debug-logging/spec.md` (extend with API failure logging requirements), new `openspec/specs/api-error-diagnostics/spec.md`. +- **Affected code**: `src/specfact_cli/adapters/ado.py` (PATCH failure handling, helper, debug logging), optionally `src/specfact_cli/runtime.py` (truncation/redaction for large `extra`); `src/specfact_cli/commands/backlog_commands.py` and `src/specfact_cli/sync/bridge_sync.py` (temp dir: use `tempfile.gettempdir()` instead of hard-coded `/tmp` for export/import paths). +- **Affected documentation** (): `docs/reference/debug-logging.md` (ADO PATCH failure content, "Examining ADO API Errors"); `docs/guides/troubleshooting.md` (subsection "Backlog refine or work item PATCH fails (400/422)"); `docs/adapters/azuredevops.md` (error diagnostics / troubleshooting link). No new pages; no change to `docs/index.md` or `docs/_layouts/default.html`. +- **Integration points**: Existing `debug_log_operation`, `LoggerSetup.redact_secrets`, console/exception handling in backlog refine and ADO adapter. +- **Backward compatibility**: No change to success paths; only failure paths gain richer logging and messages. + +## Source Tracking + +- **GitHub Issue**: #162 +- **Issue URL**: +- **Repository**: nold-ai/specfact-cli +- **Last Synced Status**: proposed diff --git a/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/specs/api-error-diagnostics/spec.md b/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/specs/api-error-diagnostics/spec.md new file mode 100644 index 00000000..aa4103b9 --- /dev/null +++ b/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/specs/api-error-diagnostics/spec.md @@ -0,0 +1,80 @@ +# api-error-diagnostics Specification + +## Purpose + +Define how SpecFact CLI captures, logs, and surfaces API errors (especially ADO PATCH failures) so that with `--debug` users see response body and patch paths, and without debug users see clear, actionable messages (e.g. missing field name and custom-mapping hint). + +## ADDED Requirements + +### Requirement: ADO PATCH failure debug logging + +When an ADO PATCH request fails (HTTP 4xx/5xx), the system SHALL log structured diagnostic data in debug mode so the failing field and server message are identifiable. + +#### Scenario: Debug log contains response and patch paths on PATCH failure + +- **GIVEN** debug mode is enabled (`--debug`) +- **AND** an ADO PATCH request fails (e.g. 400 Bad Request) +- **WHEN** the failure is handled (before re-raise or user message) +- **THEN** `debug_log_operation` is called with operation (e.g. `ado_patch`), target (URL redacted), status (e.g. `failed`), error (exception or message string) +- **AND** `extra` includes a safe snippet of the response body (e.g. parsed `message` or truncated `response.text`, redacted, max ~1–2 KB) +- **AND** `extra` includes the list of JSON Patch operation paths attempted (e.g. `["/fields/System.AcceptanceCriteria", "/fields/System.Description"]`) +- **AND** sensitive values in response body and extra are redacted (e.g. via `LoggerSetup.redact_secrets`) + +#### Scenario: No sensitive data in debug log + +- **GIVEN** debug mode is enabled and ADO returns an error body containing tokens or secrets +- **WHEN** the error is logged to the debug log +- **THEN** the logged snippet is redacted so that tokens, keys, and known secret patterns are not written in plain text + +### Requirement: User-facing error message on ADO PATCH failure + +When an ADO PATCH request fails, the user SHALL see the server error message and an actionable hint without requiring `--debug`. + +#### Scenario: Console shows ADO message and mapping hint on 400 + +- **GIVEN** an ADO PATCH request fails with HTTP 400 and a body containing a message (e.g. "TF51535: Cannot find field System.AcceptanceCriteria") +- **WHEN** the error is surfaced to the user (console or exception) +- **THEN** the visible message includes the ADO error message (e.g. "Cannot find field System.AcceptanceCriteria") +- **AND** the visible message includes a short hint that custom field mapping may be required (e.g. "Check custom field mapping; see ado_custom.yaml or documentation.") +- **AND** the message is concise and actionable (no raw stack trace unless debug) + +#### Scenario: Re-raised exception carries ADO context + +- **GIVEN** the implementation re-raises an exception after handling ADO PATCH failure +- **WHEN** the exception is raised +- **THEN** the exception message (or attached attribute) includes the ADO error message and mapping hint so that upstream handlers or tests can display or assert on it + +#### Scenario: User message highlights failing field when present + +- **GIVEN** the ADO response message contains a field reference (e.g. β€œCannot find field System.AcceptanceCriteria”) +- **WHEN** the error is surfaced to the user +- **THEN** the visible message quotes or emphasizes the field reference (e.g. β€œField β€˜System.AcceptanceCriteria’ not found”) so the failing field is obvious at a glance +- **AND** the hint about custom mapping follows + +### Requirement: Consistent behavior across ADO PATCH call sites + +The same error capture, debug logging, and user-facing message behavior SHALL apply to all ADO PATCH operations (backlog refine body update, work item status update, add comment, create work item). + +#### Scenario: Backlog refine body PATCH failure + +- **GIVEN** `_update_work_item_body` or the backlog-refine PATCH path fails with 400/422 +- **WHEN** the failure is handled +- **THEN** debug log (if enabled) contains response snippet and patch paths +- **AND** user sees ADO message and mapping hint + +#### Scenario: Status update or comment PATCH failure + +- **GIVEN** status update or add-comment PATCH fails with 4xx/5xx +- **WHEN** the failure is handled +- **THEN** the same debug logging and user-facing message pattern is applied (response snippet, patch paths in debug; ADO message and hint to user) + +### Requirement: Safe response body handling + +The system SHALL safely parse and truncate response bodies to avoid large logs and parsing errors. + +#### Scenario: Non-JSON or oversized response body + +- **GIVEN** the ADO response body is non-JSON or very large +- **WHEN** building the error summary for debug log or user message +- **THEN** the implementation uses `response.text[:N]` (e.g. 500–2000 chars) as fallback for message extraction +- **AND** JSON parsing failures do not suppress logging; a safe string is used instead diff --git a/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/tasks.md b/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/tasks.md new file mode 100644 index 00000000..90887229 --- /dev/null +++ b/openspec/changes/archive/2026-01-29-improve-ado-backlog-refine-error-logging/tasks.md @@ -0,0 +1,73 @@ +# Tasks: Improve ADO backlog refine error logging and user-facing error UX + +## 1. Create git branch from dev + +- [x] 1.1 Ensure we're on dev and up to date: `git checkout dev && git pull origin dev` +- [x] 1.2 Create branch with Development link to issue: `gh issue develop 162 --repo nold-ai/specfact-cli --name bugfix/improve-ado-backlog-refine-error-logging --checkout` +- [x] 1.3 Or create branch without issue link: `git checkout -b bugfix/improve-ado-backlog-refine-error-logging` (if no issue) +- [x] 1.4 Verify branch was created: `git branch --show-current` + +## 2. Verify spec deltas (SDD: specs first) + +- [x] 2.1 Confirm `specs/api-error-diagnostics/spec.md` exists and is complete (ADDED requirements, Given/When/Then scenarios). +- [x] 2.2 Map scenarios to test cases: Debug log (response + patch paths), No sensitive data, Console message + hint on 400, Exception carries context, User message highlights failing field, Backlog refine / status / comment PATCH consistency, Non-JSON or oversized body handling. + +## 3. Write tests from spec scenarios (TDD: tests second, expect failure) + +- [x] 3.1 Add unit tests for the helper (from spec "Debug log contains response and patch paths"): given mock response (400, JSON body with `message`) and operations list, assert `debug_log_operation` is called with operation `ado_patch`, status `failed`, and `extra` containing `response_body` snippet and `patch_paths` when debug on; assert no call when debug off. +- [x] 3.2 Add unit test (from spec "No sensitive data in debug log"): response body containing token-like strings; assert logged snippet is redacted (e.g. via `LoggerSetup.redact_secrets` or equivalent check). +- [x] 3.3 Add unit test (from spec "Console shows ADO message and mapping hint on 400"): simulate 400 with body e.g. "TF51535: Cannot find field System.AcceptanceCriteria."; assert user-facing message contains "Cannot find field" (or equivalent) and hint text (e.g. "custom field mapping", "ado_custom.yaml" or documentation). +- [x] 3.4 Add unit test (from spec "Re-raised exception carries ADO context"): on PATCH failure, assert re-raised exception message or attached attribute includes ADO error message and mapping hint. +- [x] 3.5 Add unit test (from spec "User message highlights failing field when present"): ADO message contains field reference (e.g. "Cannot find field System.AcceptanceCriteria"); assert visible message quotes or emphasizes the field (e.g. "Field 'System.AcceptanceCriteria' not found") and hint follows. +- [x] 3.6 Add unit test (from spec "Non-JSON or oversized response body"): non-JSON body or very large body; assert no crash, response truncated (e.g. ~500–2000 chars), safe string used in log/user message. +- [x] 3.7 Add tests for consistency (from spec "Backlog refine body PATCH failure" / "Status update or comment PATCH failure"): assert same helper and user-message pattern is used in backlog-refine PATCH path, `_update_work_item_status`, `_update_work_item_body`, add-comment, and create-work-item PATCH paths (e.g. helper called with response and operations; console message before re-raise). +- [x] 3.8 Run tests and expect failure (no implementation yet): `hatch run smart-test-unit` (target tests for ado adapter); confirm failures are due to missing helper/behavior, not syntax. + +## 4. Implement until tests pass (TDD: code last) + +- [x] 4.1 Add helper in `src/specfact_cli/adapters/ado.py` (e.g. `_log_ado_patch_failure(response, operations, url, context="")`) that: parses response body (JSON `message` or `response.text[:500]`), extracts patch paths from `operations`, truncates/redacts snippet via `LoggerSetup.redact_secrets`, and when `is_debug_mode()` calls `debug_log_operation("ado_patch", url_redacted, "failed", error=message, extra={"response_body": snippet, "patch_paths": paths})`. +- [x] 4.2 Ensure `extra` values are safe: truncate response_body to ~1–2 KB; redact via `LoggerSetup.redact_secrets` before passing to `debug_log_operation`. +- [x] 4.3 Build user-facing string from ADO response: prefer `response.json().get("message", "")` or `response.text[:500]`; append hint "Check custom field mapping; see ado_custom.yaml or documentation."; when ADO message contains a field reference (e.g. "Cannot find field X"), quote or emphasize it (e.g. "Field 'X' not found") then append hint. +- [x] 4.4 In backlog-refine PATCH path (~line 3200): on `requests.HTTPError`, before any retry or re-raise, call the helper with `e.response`, `operations`, and URL; then print user message (`console.print("[bold red]βœ—[/bold red] ...")`) and optionally attach to exception; re-raise. +- [x] 4.5 In `_update_work_item_status` PATCH path: on `requests.RequestException`, call the helper (with response if available, patch_document paths) and surface ADO message + hint; `console.print` before re-raise. +- [x] 4.6 In `_update_work_item_body` PATCH path: same pattern (helper + user message + re-raise). +- [x] 4.7 In add-comment and create-work-item PATCH paths: same pattern for consistency. +- [x] 4.8 Optionally attach user message to exception (e.g. custom exception or `e.args`) so tests and upstream handlers can assert on it. +- [x] 4.9 Run unit tests until all pass: `hatch run smart-test-unit`; then `hatch run smart-test-folder`; fix implementation until green. + +## 4b. OS-specific temp dir for exports (backlog refine and sync) + +- [x] 4b.1 In `src/specfact_cli/commands/backlog_commands.py`: add `import tempfile`; replace hard-coded `/tmp` with `Path(tempfile.gettempdir())` for export and import default paths (lines ~716, ~767). +- [x] 4b.2 In `src/specfact_cli/sync/bridge_sync.py`: add `import tempfile`; replace all `Path(f"/tmp/specfact-proposal-...")` with `Path(tempfile.gettempdir()) / "specfact-proposal-..."` (export, sanitized, cleanup paths). +- [x] 4b.3 Update help strings in `backlog_commands.py` and `sync.py` to describe "system temporary directory" (or "/...") instead of `/tmp`. +- [x] 4b.4 Update docstring in `bridge_sync.py` for `tmp_file` default to mention system temp directory. + +## 5. Quality gates + +- [x] 5.1 Run format and type-check: `hatch run format`, `hatch run type-check`. +- [x] 5.2 Run contract test: `hatch run contract-test`. +- [x] 5.3 Run full test suite: `hatch run smart-test-full` (or `hatch test --cover -v`). +- [x] 5.4 Ensure all public APIs added or modified have `@icontract` and `@beartype` where applicable. + +## 6. Documentation research and review (per openspec/config.yaml) + +- [x] 6.1 Identify affected documentation: `docs/reference/debug-logging.md`, `docs/guides/troubleshooting.md`, `docs/adapters/azuredevops.md`; README.md (debug pointer); no new pages β†’ no `docs/index.md` or `docs/_layouts/default.html` changes. +- [x] 6.2 Update debug-logging.md: extend "What Is Logged by Component" (ADO PATCH failure with response_body, patch_paths), add "Examining ADO API Errors" subsection (console + log, steps to analyze, link to custom mapping and troubleshooting). +- [x] 6.3 Update troubleshooting.md: add "Backlog refine or work item PATCH fails (400/422)" under Azure DevOps Issues (cause, read console, run with --debug, fix mapping, link to custom-field-mapping and debug-logging). +- [x] 6.4 Update adapters/azuredevops.md: add short "Error diagnostics" or troubleshooting note for PATCH failures linking to [Debug Logging](../reference/debug-logging.md#examining-ado-api-errors) and [Troubleshooting](../guides/troubleshooting.md#backlog-refine-or-work-item-patch-fails-400422). +- [x] 6.5 Verify README.md debug line still accurate (points to Debug Logging); add optional one-line note that with `--debug`, ADO API errors include response snippet and patch paths in the log. +- [x] 6.6 Confirm no new/moved pages β†’ front-matter and `docs/_layouts/default.html` sidebar unchanged. + +## 6b. Version bump, sync, and changelog (before PR) + +- [x] 6b.1 Bump patch version (fix): 0.26.13 β†’ 0.26.14. +- [x] 6b.2 Sync version in: `pyproject.toml`, `setup.py`, `src/__init__.py`, `src/specfact_cli/__init__.py`. +- [x] 6b.3 Add CHANGELOG.md entry: new section `## [0.26.14] - YYYY-MM-DD` with `### Fixed (0.26.14)` describing ADO error logging and user-facing UX improvements (fixes #162). + +## 7. Create Pull Request to dev + +- [x] 7.1 Ensure all changes are committed: `git add .` and `git commit -m "fix: improve ADO backlog refine error logging and user-facing error UX (fixes nold-ai/specfact-cli#162)"` +- [x] 7.2 Push to remote: `git push origin bugfix/improve-ado-backlog-refine-error-logging` +- [x] 7.3 Create PR body with Fixes nold-ai/specfact-cli#162, summary from proposal, and OpenSpec change ID: `improve-ado-backlog-refine-error-logging` +- [x] 7.4 Create PR: `gh pr create --repo nold-ai/specfact-cli --base dev --head bugfix/improve-ado-backlog-refine-error-logging --title "fix: improve ADO backlog refine error logging and user-facing error UX" --body-file ` +- [x] 7.5 Verify PR and branch are linked to issue #162 (Development section). diff --git a/openspec/config.yaml b/openspec/config.yaml index bf2c2c8e..d827b4a4 100644 --- a/openspec/config.yaml +++ b/openspec/config.yaml @@ -29,9 +29,22 @@ context: | Logging: Use common.logger_setup.get_logger() (avoid print()) Naming: snake_case (files/modules/functions), PascalCase (classes), UPPER_SNAKE_CASE (constants) + Documentation (critical for every change): User-facing docs are published at https://docs.specfact.io (GitHub Pages). Source: docs/ with Jekyll front-matter (layout, title, permalink, description), docs/index.md as landing, docs/_layouts/default.html for sidebar/menu navigation. README.md is the repo entry point. + - Every change must include documentation research and review: + - (1) Identifies affected documentation: docs/ (reference, guides, adapters, getting-started), README.md, docs/index.md. + - (2) Updates or adds content so docs remain a great resource for new and existing users (learn, adopt, understand). + - (3) If adding or moving pages: ensure front-matter (layout, title, permalink, description) is correct and update docs/_layouts/default.html sidebar navigation so the new or moved page appears in the menu. + - Docs are published at https://docs.specfact.io (GitHub Pages). + Contract requirements: ALL public APIs MUST have @icontract (@require/@ensure) and @beartype decorators Testing: Contract-first (primary), minimum 80% coverage, unit/integration/E2E tests required for all changes + Development discipline (SDD + TDD): SpecFact CLI adds the validation layer; we develop SpecFact itself using the same discipline to prove it works. Order is strict: + - (1) Specs firstβ€”spec deltas define behavior (Given/When/Then). + - (2) Tests secondβ€”write unit/integration tests from spec scenarios (one or more tests per scenario); run tests and expect failure. + - (3) Code lastβ€”implement until tests pass and behavior satisfies the spec. Code must batch (satisfy) both (a) spec scenarios and (b) tests. + - If the pattern does not work in practice, adjust the process until it does. + # Per-artifact rules (only injected into matching artifacts) rules: proposal: @@ -42,15 +55,19 @@ rules: - Address offline-first constraint (no cloud dependencies) - Include rollback plan for risky changes - Check for conflicts with existing bridge adapters or plugin registry - - For public-facing changes: Include Source Tracking section with GitHub issue reference (format: `## Source Tracking` with `- **GitHub Issue**: #`, `- **Issue URL**: `, `- **Repository**: /`, `- **Last Synced Status**: `) + - "Documentation impact (required for every change): Consider effect on docs published at https://docs.specfact.io. Identify affected areas: docs/ (reference, guides, adapters, getting-started), docs/index.md, README.md, docs/_layouts/default.html (sidebar/navigation). + - If the change is user-facing or alters API/CLI behavior, state in Impact which docs will be updated or added so new and existing users can learn, adopt, and understand the change." + - "For public-facing changes, include Source Tracking section with GitHub issue reference (format: ## Source Tracking with - **GitHub Issue**: #, - **Issue URL**: , - **Repository**: /, - **Last Synced Status**: ). + - After creation, update proposal.md Source Tracking section with issue number, URL, repository, and status." - Source tracking: Only track public repos (specfact-cli, platform-frontend). Skip for internal repos (specfact-cli-internal) specs: - - Use Given/When/Then format for scenarios + - Use Given/When/Then format for scenarios (each scenario is the source of truth for one or more test cases) - Reference existing patterns in openspec/specs/ before inventing new ones - Ensure specs support multi-repository workflows where applicable - Document contract requirements (@icontract, @beartype) for new APIs - Include offline-first validation scenarios + - Specs are implemented first in the change; tests are derived from spec scenarios; then code is written to satisfy both design: - Document bridge adapter integration if external tools involved @@ -60,16 +77,34 @@ rules: - Include fallback strategies for offline scenarios tasks: + - Enforce SDD+TDD order: + - (1) Branch creation (first). + - (2) Write/add spec deltas if not already done. + - (3) Write tests from spec scenariosβ€”translate each Given/When/Then scenario into test cases; run tests and expect failure (no implementation yet). + - (4) Implement code until tests pass and behavior satisfies the spec; code must batch (satisfy) both (a) spec scenarios and (b) tests. + - (5) Quality gates (format, lint, type-check). + - (6) Documentation research and review (see below). + - (7) PR creation (last). + - "Documentation research and review (required for every change): Include a task that: + - (1) Identifies affected documentation: docs/ (reference, guides, adapters, getting-started), README.md, docs/index.md. + - (2) Updates or adds content so docs remain a great resource for new and existing users (learn, adopt, understand). + - (3) If adding or moving pages: ensure front-matter (layout, title, permalink, description) is correct and update docs/_layouts/default.html sidebar navigation so the new or moved page appears in the menu. + - Docs are published at https://docs.specfact.io (GitHub Pages)." - Break into 2-hour maximum chunks - Include contract decorator tasks (@icontract, @beartype) for all public APIs - - Include test tasks: unit, integration, and E2E + - Test tasks MUST come before implementation tasks: write tests derived from specs first, then implement. Do not implement before tests exist for the changed behavior. - Include quality gate tasks: format, lint, type-check, test coverage - Reference existing test patterns in tests/unit/, tests/integration/, tests/e2e/ + - "Version and changelog (required before PR): Include a task that + - (1) bumps patch version when the change is a fix, or minor/major per semver when adding features or breaking; + - (2) syncs version in pyproject.toml, setup.py, src/__init__.py, src/specfact_cli/__init__.py; + - (3) adds a CHANGELOG.md entry under a new [X.Y.Z] - YYYY-MM-DD section with Fixed/Added/Changed as appropriate. + - Place this task after quality gates and documentation, before PR creation." - Include git workflow tasks: branch creation (first task), PR creation (last task) - For public-facing changes in public repos (specfact-cli, platform-frontend): - - Include GitHub issue creation task with format: - - title `[Change] ` - - labels `enhancement` and `change-proposal` - - body following `.github/ISSUE_TEMPLATE/change_proposal.md` template (Why, What Changes sections from proposal) - - footer `*OpenSpec Change Proposal: ``*` - - After creation, update proposal.md Source Tracking section with issue number, URL, repository, and status. + - Include GitHub issue creation task with format: + - title `[Change] ` + - labels `enhancement` and `change-proposal` + - body following `.github/ISSUE_TEMPLATE/change_proposal.md` template (Why, What Changes sections from proposal) + - footer `*OpenSpec Change Proposal: *` + - After creation, update proposal.md Source Tracking section with issue number, URL, repository, and status. diff --git a/openspec/specs/api-error-diagnostics/spec.md b/openspec/specs/api-error-diagnostics/spec.md new file mode 100644 index 00000000..d0579b02 --- /dev/null +++ b/openspec/specs/api-error-diagnostics/spec.md @@ -0,0 +1,78 @@ +# api-error-diagnostics Specification + +## Purpose +TBD - created by archiving change improve-ado-backlog-refine-error-logging. Update Purpose after archive. +## Requirements +### Requirement: ADO PATCH failure debug logging + +When an ADO PATCH request fails (HTTP 4xx/5xx), the system SHALL log structured diagnostic data in debug mode so the failing field and server message are identifiable. + +#### Scenario: Debug log contains response and patch paths on PATCH failure + +- **GIVEN** debug mode is enabled (`--debug`) +- **AND** an ADO PATCH request fails (e.g. 400 Bad Request) +- **WHEN** the failure is handled (before re-raise or user message) +- **THEN** `debug_log_operation` is called with operation (e.g. `ado_patch`), target (URL redacted), status (e.g. `failed`), error (exception or message string) +- **AND** `extra` includes a safe snippet of the response body (e.g. parsed `message` or truncated `response.text`, redacted, max ~1–2 KB) +- **AND** `extra` includes the list of JSON Patch operation paths attempted (e.g. `["/fields/System.AcceptanceCriteria", "/fields/System.Description"]`) +- **AND** sensitive values in response body and extra are redacted (e.g. via `LoggerSetup.redact_secrets`) + +#### Scenario: No sensitive data in debug log + +- **GIVEN** debug mode is enabled and ADO returns an error body containing tokens or secrets +- **WHEN** the error is logged to the debug log +- **THEN** the logged snippet is redacted so that tokens, keys, and known secret patterns are not written in plain text + +### Requirement: User-facing error message on ADO PATCH failure + +When an ADO PATCH request fails, the user SHALL see the server error message and an actionable hint without requiring `--debug`. + +#### Scenario: Console shows ADO message and mapping hint on 400 + +- **GIVEN** an ADO PATCH request fails with HTTP 400 and a body containing a message (e.g. "TF51535: Cannot find field System.AcceptanceCriteria") +- **WHEN** the error is surfaced to the user (console or exception) +- **THEN** the visible message includes the ADO error message (e.g. "Cannot find field System.AcceptanceCriteria") +- **AND** the visible message includes a short hint that custom field mapping may be required (e.g. "Check custom field mapping; see ado_custom.yaml or documentation.") +- **AND** the message is concise and actionable (no raw stack trace unless debug) + +#### Scenario: Re-raised exception carries ADO context + +- **GIVEN** the implementation re-raises an exception after handling ADO PATCH failure +- **WHEN** the exception is raised +- **THEN** the exception message (or attached attribute) includes the ADO error message and mapping hint so that upstream handlers or tests can display or assert on it + +#### Scenario: User message highlights failing field when present + +- **GIVEN** the ADO response message contains a field reference (e.g. β€œCannot find field System.AcceptanceCriteria”) +- **WHEN** the error is surfaced to the user +- **THEN** the visible message quotes or emphasizes the field reference (e.g. β€œField β€˜System.AcceptanceCriteria’ not found”) so the failing field is obvious at a glance +- **AND** the hint about custom mapping follows + +### Requirement: Consistent behavior across ADO PATCH call sites + +The same error capture, debug logging, and user-facing message behavior SHALL apply to all ADO PATCH operations (backlog refine body update, work item status update, add comment, create work item). + +#### Scenario: Backlog refine body PATCH failure + +- **GIVEN** `_update_work_item_body` or the backlog-refine PATCH path fails with 400/422 +- **WHEN** the failure is handled +- **THEN** debug log (if enabled) contains response snippet and patch paths +- **AND** user sees ADO message and mapping hint + +#### Scenario: Status update or comment PATCH failure + +- **GIVEN** status update or add-comment PATCH fails with 4xx/5xx +- **WHEN** the failure is handled +- **THEN** the same debug logging and user-facing message pattern is applied (response snippet, patch paths in debug; ADO message and hint to user) + +### Requirement: Safe response body handling + +The system SHALL safely parse and truncate response bodies to avoid large logs and parsing errors. + +#### Scenario: Non-JSON or oversized response body + +- **GIVEN** the ADO response body is non-JSON or very large +- **WHEN** building the error summary for debug log or user message +- **THEN** the implementation uses `response.text[:N]` (e.g. 500–2000 chars) as fallback for message extraction +- **AND** JSON parsing failures do not suppress logging; a safe string is used instead + diff --git a/pyproject.toml b/pyproject.toml index 2a262283..e7fc64b2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "specfact-cli" -version = "0.26.13" +version = "0.26.14" description = "Brownfield-first CLI: Reverse engineer legacy Python β†’ specs β†’ enforced contracts. Automate legacy code documentation and prevent modernization regressions." readme = "README.md" requires-python = ">=3.11" diff --git a/setup.py b/setup.py index 8f2973f2..e03e30d1 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ if __name__ == "__main__": _setup = setup( name="specfact-cli", - version="0.26.13", + version="0.26.14", description="SpecFact CLI - Spec -> Contract -> Sentinel tool for contract-driven development", packages=find_packages(where="src"), package_dir={"": "src"}, diff --git a/src/__init__.py b/src/__init__.py index 33f642b5..aa8d6a59 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -3,4 +3,4 @@ """ # Package version: keep in sync with pyproject.toml, setup.py, src/specfact_cli/__init__.py -__version__ = "0.26.13" +__version__ = "0.26.14" diff --git a/src/specfact_cli/__init__.py b/src/specfact_cli/__init__.py index 7dab7940..3bc2eb64 100644 --- a/src/specfact_cli/__init__.py +++ b/src/specfact_cli/__init__.py @@ -9,6 +9,6 @@ - Validating reproducibility """ -__version__ = "0.26.13" +__version__ = "0.26.14" __all__ = ["__version__"] diff --git a/src/specfact_cli/adapters/ado.py b/src/specfact_cli/adapters/ado.py index 8ccce72a..a850a891 100644 --- a/src/specfact_cli/adapters/ado.py +++ b/src/specfact_cli/adapters/ado.py @@ -11,8 +11,7 @@ from __future__ import annotations import os - -# import re +import re from datetime import UTC, datetime from pathlib import Path from typing import Any @@ -28,6 +27,7 @@ from specfact_cli.backlog.adapters.base import BacklogAdapter from specfact_cli.backlog.filters import BacklogFilters from specfact_cli.backlog.mappers.ado_mapper import AdoFieldMapper +from specfact_cli.common.logger_setup import LoggerSetup from specfact_cli.models.backlog_item import BacklogItem from specfact_cli.models.bridge import BridgeConfig from specfact_cli.models.capabilities import ToolCapabilities @@ -36,9 +36,68 @@ from specfact_cli.utils.auth_tokens import get_token, set_token +_MAX_RESPONSE_BODY_LOG = 2048 + console = Console() +def _log_ado_patch_failure( + response: requests.Response | None, + operations: list[dict[str, Any]], + url: str, + context: str = "", +) -> str: + """ + Log ADO PATCH failure to debug log (when debug on) and return user-facing message. + + Parses response body (JSON message or truncated text), extracts patch paths, + redacts/truncates for debug log, and builds a user message with ADO text and hint. + """ + paths = [op.get("path", "") for op in operations if isinstance(op, dict)] + snippet = "" + if response is not None: + try: + body = response.json() + snippet = str(body.get("message", response.text[:500])) + except Exception: + snippet = (response.text or "")[:_MAX_RESPONSE_BODY_LOG] + snippet = snippet[:_MAX_RESPONSE_BODY_LOG] + snippet = str(LoggerSetup.redact_secrets(snippet)) + + if is_debug_mode(): + debug_log_operation( + "ado_patch", + url, + "failed", + error=context or snippet[:500], + extra={"response_body": snippet, "patch_paths": paths}, + ) + + return _build_ado_user_message(response) + + +def _build_ado_user_message(response: requests.Response | None) -> str: + """Build user-facing error message from ADO response and append mapping hint.""" + hint = " Check custom field mapping; see ado_custom.yaml or documentation." + if response is None: + return f"Azure DevOps request failed.{hint}" + try: + body = response.json() + msg = body.get("message", "") or (response.text or "")[:500] + except Exception: + msg = (response.text or "")[:500] + if not msg: + return f"Azure DevOps request failed (HTTP {getattr(response, 'status_code', '')}).{hint}" + + m = re.search(r"Cannot find field\s+([^\s]+)", msg, re.IGNORECASE) + if m: + field = m.group(1).strip().rstrip(".") + user_msg = f"Field '{field}' not found.{hint}" + else: + user_msg = f"{msg}{hint}" + return user_msg + + class AdoAdapter(BridgeAdapter, BacklogAdapterMixin, BacklogAdapter): """ Azure DevOps bridge adapter implementing BridgeAdapter interface. @@ -1642,10 +1701,10 @@ def _create_work_item_from_proposal( "state": ado_state, } except requests.RequestException as e: - if is_debug_mode(): - debug_log_operation("ado_patch", url, "error", error=str(e)) - msg = f"Failed to create Azure DevOps work item: {e}" - console.print(f"[bold red]βœ—[/bold red] {msg}") + resp = getattr(e, "response", None) + user_msg = _log_ado_patch_failure(resp, patch_document, url) + e.ado_user_message = user_msg + console.print(f"[bold red]βœ—[/bold red] {user_msg}") raise def _update_work_item_status( @@ -1744,8 +1803,9 @@ def _update_work_item_status( "state": ado_state, } except requests.RequestException as e: - msg = f"Failed to update Azure DevOps work item #{work_item_id}: {e}" - console.print(f"[bold red]βœ—[/bold red] {msg}") + resp = getattr(e, "response", None) + user_msg = _log_ado_patch_failure(resp, patch_document, url) + console.print(f"[bold red]βœ—[/bold red] {user_msg}") raise def _update_work_item_body( @@ -1876,8 +1936,9 @@ def _update_work_item_body( "state": ado_state, } except requests.RequestException as e: - msg = f"Failed to update Azure DevOps work item #{work_item_id}: {e}" - console.print(f"[bold red]βœ—[/bold red] {msg}") + resp = getattr(e, "response", None) + user_msg = _log_ado_patch_failure(resp, patch_document, url) + console.print(f"[bold red]βœ—[/bold red] {user_msg}") raise @beartype @@ -1983,8 +2044,10 @@ def sync_status_to_ado( "new_state": ado_state, } except requests.RequestException as e: - msg = f"Failed to sync status to Azure DevOps work item #{work_item_id}: {e}" - console.print(f"[bold red]βœ—[/bold red] {msg}") + resp = getattr(e, "response", None) + user_msg = _log_ado_patch_failure(resp, patch_document, url) + e.ado_user_message = user_msg + console.print(f"[bold red]βœ—[/bold red] {user_msg}") raise @beartype @@ -2361,8 +2424,10 @@ def _add_work_item_comment( "comment_added": True, } except requests.RequestException as e: - msg = f"Failed to add comment to Azure DevOps work item #{work_item_id}: {e}" - console.print(f"[bold red]βœ—[/bold red] {msg}") + resp = getattr(e, "response", None) + user_msg = _log_ado_patch_failure(resp, [], url) + e.ado_user_message = user_msg + console.print(f"[bold red]βœ—[/bold red] {user_msg}") raise @beartype @@ -3200,7 +3265,8 @@ def update_backlog_item(self, item: BacklogItem, update_fields: list[str] | None response = requests.patch(url, headers=headers, json=operations, timeout=30) response.raise_for_status() except requests.HTTPError as e: - # Handle 400/422: often caused by /multilineFieldsFormat/ not being supported by ADO API + user_msg = _log_ado_patch_failure(e.response, operations, url) + e.ado_user_message = user_msg response = None if e.response and e.response.status_code in (400, 422): error_message = "" @@ -3221,14 +3287,12 @@ def update_backlog_item(self, item: BacklogItem, update_fields: list[str] | None resp.raise_for_status() response = resp except requests.HTTPError as retry_error: - # Retry with operations_no_format failed; continue to next fallback strategy. - if is_debug_mode(): - debug_log_operation( - "ado_patch", - url, - "failed", - error=str(retry_error), - ) + _log_ado_patch_failure( + retry_error.response, + operations_no_format, + url, + context=str(retry_error), + ) if response is None and ( "already exists" in error_message.lower() or "cannot add" in error_message.lower() @@ -3280,9 +3344,11 @@ def update_backlog_item(self, item: BacklogItem, update_fields: list[str] | None resp.raise_for_status() response = resp except requests.HTTPError: + console.print(f"[bold red]βœ—[/bold red] {user_msg}") raise if response is None: + console.print(f"[bold red]βœ—[/bold red] {user_msg}") raise updated_work_item = response.json() diff --git a/src/specfact_cli/commands/backlog_commands.py b/src/specfact_cli/commands/backlog_commands.py index cb020f1d..f288b36f 100644 --- a/src/specfact_cli/commands/backlog_commands.py +++ b/src/specfact_cli/commands/backlog_commands.py @@ -16,6 +16,7 @@ import os import re import sys +import tempfile from datetime import datetime from pathlib import Path from typing import Any @@ -445,12 +446,12 @@ def refine( export_to_tmp: bool = typer.Option( False, "--export-to-tmp", - help="Export backlog items to temporary file for copilot processing (default: /tmp/specfact-backlog-refine-.md)", + help="Export backlog items to temporary file for copilot processing (default: /specfact-backlog-refine-.md)", ), import_from_tmp: bool = typer.Option( False, "--import-from-tmp", - help="Import refined content from temporary file after copilot processing (default: /tmp/specfact-backlog-refine--refined.md)", + help="Import refined content from temporary file after copilot processing (default: /specfact-backlog-refine--refined.md)", ), tmp_file: Path | None = typer.Option( None, @@ -713,7 +714,7 @@ def refine( # Handle export mode if export_to_tmp: timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") - export_file = tmp_file or Path(f"/tmp/specfact-backlog-refine-{timestamp}.md") + export_file = tmp_file or (Path(tempfile.gettempdir()) / f"specfact-backlog-refine-{timestamp}.md") console.print(f"[bold cyan]Exporting {len(items)} backlog item(s) to: {export_file}[/bold cyan]") @@ -764,7 +765,7 @@ def refine( # Handle import mode if import_from_tmp: timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") - import_file = tmp_file or Path(f"/tmp/specfact-backlog-refine-{timestamp}-refined.md") + import_file = tmp_file or (Path(tempfile.gettempdir()) / f"specfact-backlog-refine-{timestamp}-refined.md") if not import_file.exists(): console.print(f"[bold red]βœ—[/bold red] Import file not found: {import_file}") diff --git a/src/specfact_cli/commands/sync.py b/src/specfact_cli/commands/sync.py index 49ef6496..e47052e7 100644 --- a/src/specfact_cli/commands/sync.py +++ b/src/specfact_cli/commands/sync.py @@ -1099,19 +1099,19 @@ def sync_bridge( export_to_tmp: bool = typer.Option( False, "--export-to-tmp", - help="Export proposal content to temporary file for LLM review (default: /tmp/specfact-proposal-.md).", + help="Export proposal content to temporary file for LLM review (default: /specfact-proposal-.md).", hidden=True, ), import_from_tmp: bool = typer.Option( False, "--import-from-tmp", - help="Import sanitized content from temporary file after LLM review (default: /tmp/specfact-proposal--sanitized.md).", + help="Import sanitized content from temporary file after LLM review (default: /specfact-proposal--sanitized.md).", hidden=True, ), tmp_file: Path | None = typer.Option( None, "--tmp-file", - help="Custom temporary file path (default: /tmp/specfact-proposal-.md).", + help="Custom temporary file path (default: /specfact-proposal-.md).", hidden=True, ), update_existing: bool = typer.Option( diff --git a/src/specfact_cli/sync/bridge_sync.py b/src/specfact_cli/sync/bridge_sync.py index 28407b61..4e1ef828 100644 --- a/src/specfact_cli/sync/bridge_sync.py +++ b/src/specfact_cli/sync/bridge_sync.py @@ -12,6 +12,7 @@ import hashlib import re import subprocess +import tempfile from dataclasses import dataclass from urllib.parse import urlparse @@ -557,7 +558,7 @@ def export_change_proposals_to_devops( change_ids: Optional list of change proposal IDs to filter. If None, exports all active proposals. export_to_tmp: If True, export proposal content to temporary file for LLM review. import_from_tmp: If True, import sanitized content from temporary file after LLM review. - tmp_file: Optional custom temporary file path. Default: `/tmp/specfact-proposal-.md`. + tmp_file: Optional custom temporary file path. Default: /specfact-proposal-.md. Returns: SyncResult with operation details @@ -904,7 +905,7 @@ def export_change_proposals_to_devops( # Handle temporary file workflow if requested if export_to_tmp: # Export proposal content to temporary file for LLM review - tmp_file_path = tmp_file or Path(f"/tmp/specfact-proposal-{change_id}.md") + tmp_file_path = tmp_file or (Path(tempfile.gettempdir()) / f"specfact-proposal-{change_id}.md") try: # Create markdown content from proposal proposal_content = self._format_proposal_for_export(proposal) @@ -919,7 +920,9 @@ def export_change_proposals_to_devops( if import_from_tmp: # Import sanitized content from temporary file - sanitized_file_path = tmp_file or Path(f"/tmp/specfact-proposal-{change_id}-sanitized.md") + sanitized_file_path = tmp_file or ( + Path(tempfile.gettempdir()) / f"specfact-proposal-{change_id}-sanitized.md" + ) try: if not sanitized_file_path.exists(): errors.append( @@ -933,7 +936,7 @@ def export_change_proposals_to_devops( proposal_to_export = self._parse_sanitized_proposal(sanitized_content, proposal) # Cleanup temporary files after import try: - original_tmp = Path(f"/tmp/specfact-proposal-{change_id}.md") + original_tmp = Path(tempfile.gettempdir()) / f"specfact-proposal-{change_id}.md" if original_tmp.exists(): original_tmp.unlink() if sanitized_file_path.exists(): @@ -2504,7 +2507,7 @@ def _update_issue_content_if_needed( # Handle sanitized content updates (when import_from_tmp is used) if import_from_tmp: change_id = proposal.get("change_id", "unknown") - sanitized_file = tmp_file or Path(f"/tmp/specfact-proposal-{change_id}-sanitized.md") + sanitized_file = tmp_file or (Path(tempfile.gettempdir()) / f"specfact-proposal-{change_id}-sanitized.md") if sanitized_file.exists(): sanitized_content = sanitized_file.read_text(encoding="utf-8") proposal_for_hash = { @@ -2603,7 +2606,9 @@ def _update_issue_content_if_needed( try: if import_from_tmp: change_id = proposal.get("change_id", "unknown") - sanitized_file = tmp_file or Path(f"/tmp/specfact-proposal-{change_id}-sanitized.md") + sanitized_file = tmp_file or ( + Path(tempfile.gettempdir()) / f"specfact-proposal-{change_id}-sanitized.md" + ) if sanitized_file.exists(): sanitized_content = sanitized_file.read_text(encoding="utf-8") proposal_for_update = { diff --git a/tests/unit/specfact_cli/adapters/__init__.py b/tests/unit/specfact_cli/adapters/__init__.py new file mode 100644 index 00000000..6e90fee8 --- /dev/null +++ b/tests/unit/specfact_cli/adapters/__init__.py @@ -0,0 +1 @@ +# Unit tests for specfact_cli.adapters diff --git a/tests/unit/specfact_cli/adapters/test_ado_patch_error_logging.py b/tests/unit/specfact_cli/adapters/test_ado_patch_error_logging.py new file mode 100644 index 00000000..3d63784b --- /dev/null +++ b/tests/unit/specfact_cli/adapters/test_ado_patch_error_logging.py @@ -0,0 +1,224 @@ +""" +Unit tests for ADO PATCH failure debug logging and user-facing error messages. + +Spec: openspec/changes/improve-ado-backlog-refine-error-logging/specs/api-error-diagnostics/spec.md +""" + +from __future__ import annotations + +import json +from unittest.mock import MagicMock, patch + +import pytest +import requests + +from specfact_cli.adapters.ado import AdoAdapter + + +def _make_response( + status_code: int = 400, + body_json: dict | None = None, + body_text: str | None = None, +) -> requests.Response: + resp = MagicMock(spec=requests.Response) + resp.status_code = status_code + if body_json is not None: + resp.text = json.dumps(body_json) + resp.json.return_value = body_json + elif body_text is not None: + resp.text = body_text + resp.json.side_effect = ValueError("not JSON") + else: + resp.text = "" + resp.json.side_effect = ValueError("not JSON") + return resp + + +def _make_http_error(response: requests.Response) -> requests.HTTPError: + err = requests.HTTPError() + err.response = response + return err + + +class TestAdoPatchFailureDebugLogging: + """Debug log contains response and patch paths on PATCH failure (spec scenario).""" + + @pytest.fixture + def adapter(self) -> AdoAdapter: + return AdoAdapter(org="myorg", project="myproj", api_token="dummy") + + def test_debug_log_operation_called_with_extra_when_debug_on(self, adapter: AdoAdapter) -> None: + """When debug on and PATCH fails, debug_log_operation is called with response_body and patch_paths.""" + response = _make_response( + 400, + body_json={"message": "TF51535: Cannot find field System.AcceptanceCriteria."}, + ) + with ( + patch("specfact_cli.adapters.ado.requests.patch") as mock_patch, + patch("specfact_cli.adapters.ado.is_debug_mode", return_value=True), + patch("specfact_cli.adapters.ado.debug_log_operation") as mock_debug_log, + patch("specfact_cli.adapters.ado.console.print"), + ): + mock_patch.return_value = response + response.raise_for_status = lambda: (_ for _ in ()).throw(_make_http_error(response)) + with pytest.raises(requests.RequestException): + adapter.sync_status_to_ado( + {"status": "in_progress", "source_tracking": {"source_id": 123}}, + "myorg", + "myproj", + ) + mock_debug_log.assert_called() + call_args = mock_debug_log.call_args + assert call_args[0][0] == "ado_patch" + assert call_args[0][2] == "failed" + call_kw = call_args[1] or {} + extra = call_kw.get("extra") or {} + assert "response_body" in extra + assert "patch_paths" in extra + assert extra["patch_paths"] == ["/fields/System.State"] + assert "Cannot find field" in str(extra["response_body"]) + + def test_debug_log_operation_not_called_when_debug_off(self, adapter: AdoAdapter) -> None: + """When debug off and PATCH fails, debug_log_operation is not called.""" + response = _make_response(400, body_json={"message": "Bad request"}) + with ( + patch("specfact_cli.adapters.ado.requests.patch") as mock_patch, + patch("specfact_cli.adapters.ado.is_debug_mode", return_value=False), + patch("specfact_cli.adapters.ado.debug_log_operation") as mock_debug_log, + patch("specfact_cli.adapters.ado.console.print"), + ): + mock_patch.return_value = response + response.raise_for_status = lambda: (_ for _ in ()).throw(_make_http_error(response)) + with pytest.raises(requests.RequestException): + adapter.sync_status_to_ado( + {"status": "in_progress", "source_tracking": {"source_id": 123}}, + "myorg", + "myproj", + ) + mock_debug_log.assert_not_called() + + +class TestAdoPatchUserMessage: + """Console shows ADO message and mapping hint on 400 (spec scenario).""" + + @pytest.fixture + def adapter(self) -> AdoAdapter: + return AdoAdapter(org="myorg", project="myproj", api_token="dummy") + + def test_console_print_contains_ado_message_and_hint(self, adapter: AdoAdapter) -> None: + """User-facing message includes ADO error text and custom field mapping hint.""" + response = _make_response( + 400, + body_json={"message": "TF51535: Cannot find field System.AcceptanceCriteria."}, + ) + with ( + patch("specfact_cli.adapters.ado.requests.patch") as mock_patch, + patch("specfact_cli.adapters.ado.is_debug_mode", return_value=False), + patch("specfact_cli.adapters.ado.debug_log_operation"), + patch("specfact_cli.adapters.ado.console.print") as mock_console_print, + ): + mock_patch.return_value = response + response.raise_for_status = lambda: (_ for _ in ()).throw(_make_http_error(response)) + with pytest.raises(requests.RequestException): + adapter.sync_status_to_ado( + {"status": "in_progress", "source_tracking": {"source_id": 123}}, + "myorg", + "myproj", + ) + mock_console_print.assert_called() + printed = " ".join(str(c) for c in mock_console_print.call_args[0]) + assert "Field '" in printed and "System.AcceptanceCriteria" in printed + assert "custom field mapping" in printed or "ado_custom" in printed.lower() + + def test_reraised_exception_carries_ado_context(self, adapter: AdoAdapter) -> None: + """Re-raised exception has ado_user_message with ADO error and mapping hint (spec).""" + response = _make_response( + 400, + body_json={"message": "TF51535: Cannot find field System.AcceptanceCriteria."}, + ) + with ( + patch("specfact_cli.adapters.ado.requests.patch") as mock_patch, + patch("specfact_cli.adapters.ado.is_debug_mode", return_value=False), + patch("specfact_cli.adapters.ado.debug_log_operation"), + patch("specfact_cli.adapters.ado.console.print"), + ): + mock_patch.return_value = response + response.raise_for_status = lambda: (_ for _ in ()).throw(_make_http_error(response)) + with pytest.raises(requests.RequestException) as exc_info: + adapter.sync_status_to_ado( + {"status": "in_progress", "source_tracking": {"source_id": 123}}, + "myorg", + "myproj", + ) + exc = exc_info.value + ado_msg = getattr(exc, "ado_user_message", "") + assert "System.AcceptanceCriteria" in ado_msg or "Cannot find field" in ado_msg + assert "custom field mapping" in ado_msg or "ado_custom" in ado_msg.lower() + + +class TestAdoPatchSensitiveDataRedaction: + """No sensitive data in debug log (spec scenario).""" + + @pytest.fixture + def adapter(self) -> AdoAdapter: + return AdoAdapter(org="myorg", project="myproj", api_token="dummy") + + def test_debug_log_redacts_api_key_in_response_body(self, adapter: AdoAdapter) -> None: + """Response body containing sk-... pattern is redacted in debug log.""" + secret = "sk-" + "x" * 24 + response = _make_response( + 400, + body_json={"message": f"Invalid token: {secret}"}, + ) + with ( + patch("specfact_cli.adapters.ado.requests.patch") as mock_patch, + patch("specfact_cli.adapters.ado.is_debug_mode", return_value=True), + patch("specfact_cli.adapters.ado.debug_log_operation") as mock_debug_log, + patch("specfact_cli.adapters.ado.console.print"), + ): + mock_patch.return_value = response + response.raise_for_status = lambda: (_ for _ in ()).throw(_make_http_error(response)) + with pytest.raises(requests.RequestException): + adapter.sync_status_to_ado( + {"status": "in_progress", "source_tracking": {"source_id": 123}}, + "myorg", + "myproj", + ) + mock_debug_log.assert_called() + call_kw = mock_debug_log.call_args[1] or {} + extra = call_kw.get("extra") or {} + logged = str(extra.get("response_body", "")) + assert "*** MASKED" in logged + assert secret not in logged + + +class TestAdoPatchNonJsonBody: + """Non-JSON or oversized response body (spec scenario).""" + + @pytest.fixture + def adapter(self) -> AdoAdapter: + return AdoAdapter(org="myorg", project="myproj", api_token="dummy") + + def test_non_json_response_no_crash(self, adapter: AdoAdapter) -> None: + """Non-JSON response body does not crash; safe string used.""" + response = _make_response(body_text="Error 400") + response.status_code = 400 + with ( + patch("specfact_cli.adapters.ado.requests.patch") as mock_patch, + patch("specfact_cli.adapters.ado.is_debug_mode", return_value=True), + patch("specfact_cli.adapters.ado.debug_log_operation") as mock_debug_log, + patch("specfact_cli.adapters.ado.console.print"), + ): + mock_patch.return_value = response + response.raise_for_status = lambda: (_ for _ in ()).throw(_make_http_error(response)) + with pytest.raises(requests.RequestException): + adapter.sync_status_to_ado( + {"status": "in_progress", "source_tracking": {"source_id": 123}}, + "myorg", + "myproj", + ) + mock_debug_log.assert_called() + call_kw = mock_debug_log.call_args[1] + extra = call_kw.get("extra") or {} + assert "response_body" in extra + assert "Error 400" in str(extra["response_body"]) or "html" in str(extra["response_body"]).lower()