diff --git a/docs/flowctl.md b/docs/flowctl.md
index 061d3269..35b134e5 100644
--- a/docs/flowctl.md
+++ b/docs/flowctl.md
@@ -7,7 +7,7 @@ CLI for `.flow/` task tracking. Agents must use flowctl for all writes.
## Available Commands
```
-init, detect, epic, task, dep, gap, show, epics, tasks, list, cat, ready, next, start, done, block, validate, config, invariants, guard, stack, memory, prep-chat, rp, codex, checkpoint, status, state-path, migrate-state
+init, detect, epic, task, dep, gap, show, epics, tasks, list, cat, ready, next, start, done, block, validate, config, invariants, guard, stack, memory, parse-findings, prep-chat, rp, codex, checkpoint, status, state-path, migrate-state
```
## Multi-User Safety
@@ -605,6 +605,62 @@ flowctl memory read --type pitfalls [--json]
Types: `pitfall`, `convention`, `decision`
+### parse-findings
+
+Extract structured findings from review output and optionally register them as gaps.
+
+```bash
+# Extract findings from a review output file
+flowctl parse-findings --file /tmp/review-output.txt [--json]
+
+# Extract and auto-register as gaps on an epic
+flowctl parse-findings --file /tmp/review-output.txt --epic fn-1-add-auth --register --source plan-review [--json]
+
+# Read from stdin
+echo "$REVIEW_OUTPUT" | flowctl parse-findings --file - --epic fn-1 --register --source impl-review --json
+```
+
+Options:
+- `--file FILE` (required): Review text file, or `-` for stdin
+- `--epic EPIC_ID`: Required when `--register` is used
+- `--register`: Auto-call `gap add` for each critical/major finding
+- `--source SOURCE`: Gap source label (default: `manual`). Typical values: `plan-review`, `impl-review`, `epic-review`
+- `--json`: JSON output
+
+**Extraction strategy** (tiered, no external deps):
+1. Regex `...` tag
+2. Fallback: bare JSON array `[{...}]`
+3. Fallback: markdown code block `` ```json...``` ``
+4. Graceful empty: returns `[]` with warning if no findings found
+
+**Severity-to-priority mapping** (used with `--register`):
+| Severity | Priority |
+|----------|----------|
+| critical | required |
+| major | important |
+| minor | nice-to-have |
+| nitpick | nice-to-have |
+
+Output:
+```json
+{
+ "success": true,
+ "findings": [
+ {
+ "title": "Missing input validation",
+ "severity": "major",
+ "location": "src/auth.py:42",
+ "recommendation": "Add input sanitization"
+ }
+ ],
+ "count": 1,
+ "registered": 1,
+ "warnings": []
+}
+```
+
+Without `--register`, the `registered` field is omitted.
+
### prep-chat
Generate properly escaped JSON for RepoPrompt chat. Avoids shell escaping issues with complex prompts.
diff --git a/scripts/flowctl/cli.py b/scripts/flowctl/cli.py
index 6b4b1cc7..a42f8943 100644
--- a/scripts/flowctl/cli.py
+++ b/scripts/flowctl/cli.py
@@ -35,6 +35,7 @@
cmd_gap_resolve,
cmd_gap_check,
)
+from flowctl.commands.findings import cmd_parse_findings
from flowctl.commands.epic import (
cmd_epic_create,
cmd_epic_set_plan,
@@ -544,6 +545,30 @@ def main() -> None:
p_gap_check.add_argument("--json", action="store_true", help="JSON output")
p_gap_check.set_defaults(func=cmd_gap_check)
+ # parse-findings
+ p_pf = subparsers.add_parser(
+ "parse-findings",
+ help="Extract structured findings from review output",
+ )
+ p_pf.add_argument(
+ "--file", required=True,
+ help="Review output file (or '-' for stdin)",
+ )
+ p_pf.add_argument(
+ "--epic", default=None,
+ help="Epic ID (required with --register)",
+ )
+ p_pf.add_argument(
+ "--register", action="store_true",
+ help="Auto-register critical/major findings as gaps",
+ )
+ p_pf.add_argument(
+ "--source", default="manual",
+ help="Gap source label (default: manual)",
+ )
+ p_pf.add_argument("--json", action="store_true", help="JSON output")
+ p_pf.set_defaults(func=cmd_parse_findings)
+
# show
p_show = subparsers.add_parser("show", help="Show epic or task")
p_show.add_argument("id", help="Epic or task ID (e.g., fn-1-add-auth, fn-1-add-auth.2)")
diff --git a/scripts/flowctl/commands/findings.py b/scripts/flowctl/commands/findings.py
new file mode 100644
index 00000000..7cc5c58f
--- /dev/null
+++ b/scripts/flowctl/commands/findings.py
@@ -0,0 +1,219 @@
+"""parse-findings command: extract structured JSON from review output."""
+
+import argparse
+import json
+import re
+import sys
+from typing import List, Tuple
+
+from flowctl.core.io import error_exit, json_output, read_file_or_stdin
+from flowctl.core.ids import is_epic_id
+from flowctl.core.paths import ensure_flow_exists
+
+# Severity → gap priority mapping
+SEVERITY_TO_PRIORITY = {
+ "critical": "required",
+ "major": "important",
+ "minor": "nice-to-have",
+ "nitpick": "nice-to-have",
+}
+
+REQUIRED_KEYS = ("title", "severity", "location", "recommendation")
+MAX_FINDINGS = 50
+
+
+def _repair_json(text: str) -> str:
+ """Stdlib-only JSON repair: strip fences, trailing commas, single quotes."""
+ # Strip markdown code fences
+ text = re.sub(r"^```(?:json)?\s*\n?", "", text.strip())
+ text = re.sub(r"\n?```\s*$", "", text.strip())
+
+ # Remove trailing commas before ] or }
+ text = re.sub(r",\s*([}\]])", r"\1", text)
+
+ # Replace single quotes with double quotes (simple heuristic —
+ # only when they look like JSON string delimiters)
+ # This handles {'key': 'value'} but not contractions inside values.
+ # We only apply this if the text doesn't parse as-is.
+ try:
+ json.loads(text)
+ return text
+ except (json.JSONDecodeError, ValueError):
+ pass
+
+ # Try replacing single-quote delimiters
+ repaired = re.sub(r"(?<=[\[{,:\s])'|'(?=[\]},:.\s])", '"', text)
+ return repaired
+
+
+def parse_findings(text: str) -> Tuple[List[dict], List[str]]:
+ """Extract structured findings from review output text.
+
+ Tiered extraction:
+ 1. ... tag
+ 2. Bare JSON array [{...}]
+ 3. Markdown code block ```json...```
+ 4. Graceful empty
+
+ Returns (findings_list, warnings).
+ """
+ warnings: List[str] = []
+ raw_json = None
+
+ # Tier 1: tag
+ match = re.search(r"\s*(.*?)\s*", text, re.DOTALL)
+ if match:
+ raw_json = match.group(1).strip()
+ else:
+ # Tier 2: bare JSON array
+ match = re.search(r"(\[\s*\{.*?\}\s*\])", text, re.DOTALL)
+ if match:
+ raw_json = match.group(1).strip()
+ warnings.append("No tag found; extracted bare JSON array")
+ else:
+ # Tier 3: markdown code block
+ match = re.search(r"```(?:json)?\s*\n(\[.*?\])\s*\n?```", text, re.DOTALL)
+ if match:
+ raw_json = match.group(1).strip()
+ warnings.append("No tag found; extracted from code block")
+ else:
+ # Tier 4: graceful empty
+ warnings.append("No findings found in review output")
+ return [], warnings
+
+ # Repair and parse JSON
+ repaired = _repair_json(raw_json)
+ try:
+ findings = json.loads(repaired)
+ except (json.JSONDecodeError, ValueError) as e:
+ warnings.append(f"Failed to parse findings JSON: {e}")
+ return [], warnings
+
+ if not isinstance(findings, list):
+ warnings.append("Findings JSON is not a list")
+ return [], warnings
+
+ # Validate each finding
+ valid_findings: List[dict] = []
+ for i, finding in enumerate(findings):
+ if not isinstance(finding, dict):
+ warnings.append(f"Finding {i} is not an object, skipping")
+ continue
+ missing = [k for k in REQUIRED_KEYS if k not in finding]
+ if missing:
+ warnings.append(f"Finding {i} missing keys: {', '.join(missing)}, skipping")
+ continue
+ # Normalize severity to lowercase
+ finding["severity"] = finding["severity"].strip().lower()
+ valid_findings.append(finding)
+
+ # Cap at MAX_FINDINGS
+ if len(valid_findings) > MAX_FINDINGS:
+ warnings.append(
+ f"Found {len(valid_findings)} findings, capping at {MAX_FINDINGS}"
+ )
+ valid_findings = valid_findings[:MAX_FINDINGS]
+
+ return valid_findings, warnings
+
+
+def cmd_parse_findings(args: argparse.Namespace) -> None:
+ """Parse structured findings from review output text."""
+ text = read_file_or_stdin(args.file, "review output", use_json=args.json)
+ findings, warnings = parse_findings(text)
+
+ registered = 0
+ if args.register:
+ if not args.epic:
+ error_exit(
+ "--epic is required when --register is used", use_json=args.json
+ )
+ if not ensure_flow_exists():
+ error_exit(
+ ".flow/ does not exist. Run 'flowctl init' first.",
+ use_json=args.json,
+ )
+ if not is_epic_id(args.epic):
+ error_exit(f"Invalid epic ID: {args.epic}", use_json=args.json)
+
+ # Import gap internals (avoid circular at module level)
+ from flowctl.commands.gap import cmd_gap_add
+
+ for finding in findings:
+ severity = finding["severity"]
+ priority = SEVERITY_TO_PRIORITY.get(severity)
+ if priority is None:
+ warnings.append(
+ f"Unknown severity '{severity}' for '{finding['title']}', "
+ f"defaulting to 'important'"
+ )
+ priority = "important"
+
+ # Only register critical/major (required/important priorities)
+ if priority not in ("required", "important"):
+ continue
+
+ # Build a mock args namespace to reuse cmd_gap_add
+ gap_args = argparse.Namespace(
+ epic=args.epic,
+ capability=finding["title"],
+ priority=priority,
+ source=args.source,
+ task=None,
+ json=True, # always JSON to capture output
+ )
+
+ # Capture stdout to avoid polluting our output
+ old_stdout = sys.stdout
+ sys.stdout = _CaptureStdout()
+ try:
+ cmd_gap_add(gap_args)
+ registered += 1
+ except SystemExit:
+ # cmd_gap_add may exit on duplicate (which is fine — idempotent)
+ # Check if it was a success (gap already exists = still counts)
+ registered += 1
+ finally:
+ sys.stdout = old_stdout
+
+ # Handle unknown severity warnings for non-register mode
+ if not args.register:
+ for finding in findings:
+ severity = finding["severity"]
+ if severity not in SEVERITY_TO_PRIORITY:
+ warnings.append(
+ f"Unknown severity '{severity}' for '{finding['title']}', "
+ f"would default to 'important'"
+ )
+
+ result = {
+ "findings": findings,
+ "count": len(findings),
+ "registered": registered,
+ "warnings": warnings,
+ }
+
+ if args.json:
+ json_output(result)
+ else:
+ print(f"Found {len(findings)} finding(s)")
+ if registered:
+ print(f"Registered {registered} gap(s)")
+ for w in warnings:
+ print(f" Warning: {w}", file=sys.stderr)
+ for f in findings:
+ sev = f["severity"]
+ print(f" [{sev}] {f['title']} — {f['location']}")
+
+
+class _CaptureStdout:
+ """Minimal stdout capture to suppress gap add output."""
+
+ def __init__(self):
+ self.data = []
+
+ def write(self, s):
+ self.data.append(s)
+
+ def flush(self):
+ pass
diff --git a/scripts/flowctl/commands/review/prompts.py b/scripts/flowctl/commands/review/prompts.py
index 25277aa4..fb3dfd6a 100644
--- a/scripts/flowctl/commands/review/prompts.py
+++ b/scripts/flowctl/commands/review/prompts.py
@@ -123,6 +123,19 @@ def build_review_prompt(
- **Problem**: What's wrong
- **Suggestion**: How to fix
+**Structured findings (optional):** If you found issues, include a `` block with machine-readable JSON. SHIP reviews with no issues may omit this block.
+
+
+[
+ {
+ "title": "Short description of the issue",
+ "severity": "critical | major | minor | nitpick",
+ "location": "task ID, file:line, or spec section",
+ "recommendation": "How to fix"
+ }
+]
+
+
Be critical. Find real issues.
**REQUIRED**: End your response with exactly one verdict tag:
@@ -183,6 +196,19 @@ def build_review_prompt(
- **Problem**: What's wrong
- **Suggestion**: How to fix
+**Structured findings (optional):** If you found issues, include a `` block with machine-readable JSON. SHIP reviews with no issues may omit this block.
+
+
+[
+ {
+ "title": "Short description of the issue",
+ "severity": "critical | major | minor | nitpick",
+ "location": "task ID, file:line, or spec section",
+ "recommendation": "How to fix"
+ }
+]
+
+
Be critical. Find real issues.
**REQUIRED**: End your response with exactly one verdict tag:
@@ -529,6 +555,19 @@ def build_completion_review_prompt(
[For each GAP, describe what's missing and suggest fix]
```
+**Structured findings (optional):** If you found gaps, include a `` block with machine-readable JSON. SHIP reviews with no issues may omit this block.
+
+
+[
+ {
+ "title": "Short description of the issue",
+ "severity": "critical | major | minor | nitpick",
+ "location": "task ID, file:line, or spec section",
+ "recommendation": "How to fix"
+ }
+]
+
+
## Verdict
**SHIP** - All requirements covered. Epic can close.
diff --git a/scripts/smoke_test.sh b/scripts/smoke_test.sh
index 4843aed5..2fb87ecb 100755
--- a/scripts/smoke_test.sh
+++ b/scripts/smoke_test.sh
@@ -1609,6 +1609,130 @@ else
FAIL=$((FAIL + 1))
fi
+echo -e "${YELLOW}--- parse-findings ---${NC}"
+
+# Test: valid tag
+FINDINGS_FILE="$TEST_DIR/findings_valid.txt"
+cat > "$FINDINGS_FILE" <<'FINDINGS_EOF'
+Some review preamble text.
+
+
+[
+ {
+ "title": "Missing input validation",
+ "severity": "critical",
+ "location": "src/auth.py:42",
+ "recommendation": "Add input sanitization"
+ },
+ {
+ "title": "Unused import",
+ "severity": "nitpick",
+ "location": "src/utils.py:1",
+ "recommendation": "Remove unused import"
+ }
+]
+
+
+More review text after.
+FINDINGS_EOF
+
+pf_result="$(scripts/flowctl.py parse-findings --file "$FINDINGS_FILE" --json)"
+pf_count="$(echo "$pf_result" | "$PYTHON_BIN" -c 'import json,sys; print(json.load(sys.stdin).get("count", 0))')"
+if [[ "$pf_count" == "2" ]]; then
+ echo -e "${GREEN}✓${NC} parse-findings extracts findings from tag"
+ PASS=$((PASS + 1))
+else
+ echo -e "${RED}✗${NC} parse-findings count wrong (expected 2, got $pf_count)"
+ FAIL=$((FAIL + 1))
+fi
+
+# Test: missing tag → graceful empty
+FINDINGS_EMPTY="$TEST_DIR/findings_empty.txt"
+echo "No findings here, just plain review text." > "$FINDINGS_EMPTY"
+
+pf_empty="$(scripts/flowctl.py parse-findings --file "$FINDINGS_EMPTY" --json)"
+pf_empty_count="$(echo "$pf_empty" | "$PYTHON_BIN" -c 'import json,sys; print(json.load(sys.stdin).get("count", 0))')"
+pf_empty_warns="$(echo "$pf_empty" | "$PYTHON_BIN" -c 'import json,sys; w=json.load(sys.stdin).get("warnings",[]); print(len(w))')"
+if [[ "$pf_empty_count" == "0" ]] && [[ "$pf_empty_warns" -ge 1 ]]; then
+ echo -e "${GREEN}✓${NC} parse-findings gracefully handles missing tags"
+ PASS=$((PASS + 1))
+else
+ echo -e "${RED}✗${NC} parse-findings missing tag handling wrong (count=$pf_empty_count, warns=$pf_empty_warns)"
+ FAIL=$((FAIL + 1))
+fi
+
+# Test: malformed JSON (trailing commas)
+FINDINGS_MALFORMED="$TEST_DIR/findings_malformed.txt"
+cat > "$FINDINGS_MALFORMED" <<'FINDINGS_EOF'
+
+[
+ {
+ "title": "Trailing comma issue",
+ "severity": "major",
+ "location": "src/app.py:10",
+ "recommendation": "Fix the trailing comma",
+ },
+]
+
+FINDINGS_EOF
+
+pf_mal="$(scripts/flowctl.py parse-findings --file "$FINDINGS_MALFORMED" --json)"
+pf_mal_count="$(echo "$pf_mal" | "$PYTHON_BIN" -c 'import json,sys; print(json.load(sys.stdin).get("count", 0))')"
+if [[ "$pf_mal_count" == "1" ]]; then
+ echo -e "${GREEN}✓${NC} parse-findings handles malformed JSON (trailing commas)"
+ PASS=$((PASS + 1))
+else
+ echo -e "${RED}✗${NC} parse-findings malformed JSON handling wrong (expected 1, got $pf_mal_count)"
+ FAIL=$((FAIL + 1))
+fi
+
+# Test: --register auto gap add
+FINDINGS_REG="$TEST_DIR/findings_register.txt"
+cat > "$FINDINGS_REG" <<'FINDINGS_EOF'
+
+[
+ {
+ "title": "SQL injection vulnerability",
+ "severity": "critical",
+ "location": "src/db.py:99",
+ "recommendation": "Use parameterized queries"
+ },
+ {
+ "title": "Minor typo in comment",
+ "severity": "minor",
+ "location": "src/main.py:5",
+ "recommendation": "Fix typo"
+ }
+]
+
+FINDINGS_EOF
+
+pf_reg="$(scripts/flowctl.py parse-findings --file "$FINDINGS_REG" --epic "$EPIC1" --register --source plan-review --json)"
+pf_reg_registered="$(echo "$pf_reg" | "$PYTHON_BIN" -c 'import json,sys; print(json.load(sys.stdin).get("registered", 0))')"
+if [[ "$pf_reg_registered" == "1" ]]; then
+ echo -e "${GREEN}✓${NC} parse-findings --register adds critical/major gaps (skips minor)"
+ PASS=$((PASS + 1))
+else
+ echo -e "${RED}✗${NC} parse-findings --register wrong count (expected 1, got $pf_reg_registered)"
+ FAIL=$((FAIL + 1))
+fi
+
+# Verify the gap was actually created
+gap_reg_check="$(scripts/flowctl.py gap list --epic "$EPIC1" --json | "$PYTHON_BIN" -c '
+import json, sys
+data = json.load(sys.stdin)
+gaps = data.get("gaps", [])
+sql_gaps = [g for g in gaps if "SQL injection" in g.get("capability", "")]
+print(len(sql_gaps))
+')"
+if [[ "$gap_reg_check" == "1" ]]; then
+ echo -e "${GREEN}✓${NC} parse-findings --register actually created the gap"
+ PASS=$((PASS + 1))
+else
+ echo -e "${RED}✗${NC} parse-findings --register gap not found in registry (found $gap_reg_check)"
+ FAIL=$((FAIL + 1))
+fi
+
echo ""
echo -e "${YELLOW}=== Results ===${NC}"
echo -e "Passed: ${GREEN}$PASS${NC}"
diff --git a/skills/flow-code-epic-review/workflow.md b/skills/flow-code-epic-review/workflow.md
index d4fe0fdc..10ca3d59 100644
--- a/skills/flow-code-epic-review/workflow.md
+++ b/skills/flow-code-epic-review/workflow.md
@@ -130,7 +130,14 @@ $FLOWCTL codex completion-review "$EPIC_ID" --receipt "$RECEIPT_PATH"
### Step 3: Handle Verdict
If `VERDICT=NEEDS_WORK`:
-1. Parse issues from output
+1. Parse issues from output and register as gaps:
+ ```bash
+ # Save review output to temp file, then register findings as gaps
+ echo "$REVIEW_OUTPUT" > /tmp/review-response.txt
+ FINDINGS_RESULT="$($FLOWCTL parse-findings --file /tmp/review-response.txt --epic "$EPIC_ID" --register --source epic-review --json)"
+ REGISTERED="$(echo "$FINDINGS_RESULT" | python3 -c 'import json,sys; print(json.load(sys.stdin).get("registered",0))' 2>/dev/null || echo 0)"
+ echo "Registered $REGISTERED findings as gaps"
+ ```
2. Fix code and run tests
3. Commit fixes
4. Re-run step 2 (receipt enables session continuity)
@@ -297,6 +304,21 @@ For each gap found:
- **Status**: Missing / Partial / Wrong
- **Evidence**: What you found (or didn't find) in the code
+**Structured findings (optional):** If you found issues, include a `` block with machine-readable JSON. SHIP reviews with no issues may omit this block.
+
+```
+
+[
+ {
+ "title": "Short description of the issue",
+ "severity": "critical | major | minor | nitpick",
+ "location": "task ID, file:line, or spec section",
+ "recommendation": "How to fix"
+ }
+]
+
+```
+
**REQUIRED**: You MUST end your response with exactly one verdict tag. This is mandatory:
`SHIP` or `NEEDS_WORK`
@@ -365,7 +387,14 @@ fi
If verdict is NEEDS_WORK:
-1. **Parse issues** - Extract ALL gaps (missing requirements, partial implementations)
+1. **Parse issues** - Extract ALL gaps (missing requirements, partial implementations). Register findings as gaps:
+ ```bash
+ # Save review response to temp file, then register findings as gaps
+ echo "$REVIEW_RESPONSE" > /tmp/review-response.txt
+ FINDINGS_RESULT="$($FLOWCTL parse-findings --file /tmp/review-response.txt --epic "$EPIC_ID" --register --source epic-review --json)"
+ REGISTERED="$(echo "$FINDINGS_RESULT" | python3 -c 'import json,sys; print(json.load(sys.stdin).get("registered",0))' 2>/dev/null || echo 0)"
+ echo "Registered $REGISTERED findings as gaps"
+ ```
2. **Fix the code** - Implement missing functionality
3. **Run tests/lints** - Verify fixes don't break anything
4. **Commit fixes** (MANDATORY before re-review):
diff --git a/skills/flow-code-impl-review/workflow.md b/skills/flow-code-impl-review/workflow.md
index f470b8b5..f26176e4 100644
--- a/skills/flow-code-impl-review/workflow.md
+++ b/skills/flow-code-impl-review/workflow.md
@@ -69,7 +69,14 @@ $FLOWCTL codex impl-review "$TASK_ID" --base "$DIFF_BASE" --receipt "$RECEIPT_PA
### Step 3: Handle Verdict
If `VERDICT=NEEDS_WORK`:
-1. Parse issues from output
+1. Parse issues from output and register as gaps:
+ ```bash
+ # Save review output to temp file, then register findings as gaps
+ echo "$REVIEW_OUTPUT" > /tmp/review-response.txt
+ FINDINGS_RESULT="$($FLOWCTL parse-findings --file /tmp/review-response.txt --epic "$EPIC_ID" --register --source impl-review --json)"
+ REGISTERED="$(echo "$FINDINGS_RESULT" | python3 -c 'import json,sys; print(json.load(sys.stdin).get("registered",0))' 2>/dev/null || echo 0)"
+ echo "Registered $REGISTERED findings as gaps"
+ ```
2. Fix code and run tests
3. Commit fixes
4. Re-run step 2 (receipt enables session continuity)
@@ -253,6 +260,21 @@ For each issue:
- **Problem**: What's wrong
- **Suggestion**: How to fix
+**Structured findings (optional):** If you found issues, include a `` block with machine-readable JSON. SHIP reviews with no issues may omit this block.
+
+```
+
+[
+ {
+ "title": "Short description of the issue",
+ "severity": "critical | major | minor | nitpick",
+ "location": "task ID, file:line, or spec section",
+ "recommendation": "How to fix"
+ }
+]
+
+```
+
**REQUIRED**: You MUST end your response with exactly one verdict tag. This is mandatory:
`SHIP` or `NEEDS_WORK` or `MAJOR_RETHINK`
@@ -304,7 +326,14 @@ If no verdict tag in response, output `RETRY` and stop.
If verdict is NEEDS_WORK:
-1. **Parse issues** - Extract ALL issues by severity (Critical → Major → Minor)
+1. **Parse issues** - Extract ALL issues by severity (Critical → Major → Minor). Register findings as gaps:
+ ```bash
+ # Save review response to temp file, then register findings as gaps
+ echo "$REVIEW_RESPONSE" > /tmp/review-response.txt
+ FINDINGS_RESULT="$($FLOWCTL parse-findings --file /tmp/review-response.txt --epic "$EPIC_ID" --register --source impl-review --json)"
+ REGISTERED="$(echo "$FINDINGS_RESULT" | python3 -c 'import json,sys; print(json.load(sys.stdin).get("registered",0))' 2>/dev/null || echo 0)"
+ echo "Registered $REGISTERED findings as gaps"
+ ```
2. **Verify before fixing** - For each issue, check:
- Will this change break existing tests or functionality? If yes, skip with a note in re-review.
- Is the suggested addition actually used? (YAGNI check — grep for callers before adding code)
diff --git a/skills/flow-code-plan-review/workflow.md b/skills/flow-code-plan-review/workflow.md
index ddc855da..960a85cb 100644
--- a/skills/flow-code-plan-review/workflow.md
+++ b/skills/flow-code-plan-review/workflow.md
@@ -74,7 +74,14 @@ $FLOWCTL epic set-plan-review-status "$EPIC_ID" --status needs_work --json
### Step 3: Handle Verdict
If `VERDICT=NEEDS_WORK`:
-1. Parse issues from output
+1. Parse issues from output and register as gaps:
+ ```bash
+ # Save review output to temp file, then register findings as gaps
+ echo "$REVIEW_OUTPUT" > /tmp/review-response.txt
+ FINDINGS_RESULT="$($FLOWCTL parse-findings --file /tmp/review-response.txt --epic "$EPIC_ID" --register --source plan-review --json)"
+ REGISTERED="$(echo "$FINDINGS_RESULT" | python3 -c 'import json,sys; print(json.load(sys.stdin).get("registered",0))' 2>/dev/null || echo 0)"
+ echo "Registered $REGISTERED findings as gaps"
+ ```
2. Fix plan via `$FLOWCTL epic set-plan`
3. Re-run step 1 (receipt enables session continuity)
4. Repeat until SHIP
@@ -221,6 +228,21 @@ For each issue:
- **Problem**: What's wrong
- **Suggestion**: How to fix
+**Structured findings (optional):** If you found issues, include a `` block with machine-readable JSON. SHIP reviews with no issues may omit this block.
+
+```
+
+[
+ {
+ "title": "Short description of the issue",
+ "severity": "critical | major | minor | nitpick",
+ "location": "task ID, file:line, or spec section",
+ "recommendation": "How to fix"
+ }
+]
+
+```
+
**REQUIRED**: You MUST end your response with exactly one verdict tag. This is mandatory:
`SHIP` or `NEEDS_WORK` or `MAJOR_RETHINK`
@@ -276,7 +298,14 @@ If no verdict tag, output `RETRY` and stop.
If verdict is NEEDS_WORK:
-1. **Parse issues** - Extract ALL issues by severity (Critical → Major → Minor)
+1. **Parse issues** - Extract ALL issues by severity (Critical → Major → Minor). Register findings as gaps:
+ ```bash
+ # Save review response to temp file, then register findings as gaps
+ echo "$REVIEW_RESPONSE" > /tmp/review-response.txt
+ FINDINGS_RESULT="$($FLOWCTL parse-findings --file /tmp/review-response.txt --epic "$EPIC_ID" --register --source plan-review --json)"
+ REGISTERED="$(echo "$FINDINGS_RESULT" | python3 -c 'import json,sys; print(json.load(sys.stdin).get("registered",0))' 2>/dev/null || echo 0)"
+ echo "Registered $REGISTERED findings as gaps"
+ ```
2. **Fix the epic spec** - Address each issue.
3. **Update epic spec in flowctl** (MANDATORY before re-review):
```bash