diff --git a/.claude/hooks/rtk-rewrite.sh b/.claude/hooks/rtk-rewrite.sh
index e1f8d1e57..6e7524d89 100755
--- a/.claude/hooks/rtk-rewrite.sh
+++ b/.claude/hooks/rtk-rewrite.sh
@@ -1,9 +1,16 @@
#!/bin/bash
+# rtk-hook-version: 3
# RTK auto-rewrite hook for Claude Code PreToolUse:Bash
# Transparently rewrites raw commands to their RTK equivalents.
# Uses `rtk rewrite` as single source of truth — no duplicate mapping logic here.
#
# To add support for new commands, update src/discover/registry.rs (PATTERNS + RULES).
+#
+# Exit code protocol for `rtk rewrite`:
+# 0 + stdout Rewrite found, no deny/ask rule matched → auto-allow
+# 1 No RTK equivalent → pass through unchanged
+# 2 Deny rule matched → pass through (Claude Code native deny handles it)
+# 3 + stdout Ask rule matched → rewrite but let Claude Code prompt the user
# --- Audit logging (opt-in via RTK_HOOK_AUDIT=1) ---
_rtk_audit_log() {
@@ -37,19 +44,37 @@ case "$CMD" in
*'<<'*) _rtk_audit_log "skip:heredoc" "$CMD"; exit 0 ;;
esac
-# Rewrite via rtk — single source of truth for all command mappings.
-# Exit 1 = no RTK equivalent, pass through unchanged.
-# Exit 0 = rewritten command (or already RTK, identical output).
-REWRITTEN=$(rtk rewrite "$CMD" 2>/dev/null) || {
- _rtk_audit_log "skip:no_match" "$CMD"
- exit 0
-}
+# Rewrite via rtk — single source of truth for all command mappings and permission checks.
+# Use "|| EXIT_CODE=$?" to capture non-zero exit codes without triggering set -e.
+EXIT_CODE=0
+REWRITTEN=$(rtk rewrite "$CMD" 2>/dev/null) || EXIT_CODE=$?
-# If output is identical, command was already using RTK — nothing to do.
-if [ "$CMD" = "$REWRITTEN" ]; then
- _rtk_audit_log "skip:already_rtk" "$CMD"
- exit 0
-fi
+case $EXIT_CODE in
+ 0)
+ # Rewrite found, no permission rules matched — safe to auto-allow.
+ if [ "$CMD" = "$REWRITTEN" ]; then
+ _rtk_audit_log "skip:already_rtk" "$CMD"
+ exit 0
+ fi
+ ;;
+ 1)
+ # No RTK equivalent — pass through unchanged.
+ _rtk_audit_log "skip:no_match" "$CMD"
+ exit 0
+ ;;
+ 2)
+ # Deny rule matched — let Claude Code's native deny rule handle it.
+ _rtk_audit_log "skip:deny_rule" "$CMD"
+ exit 0
+ ;;
+ 3)
+ # Ask rule matched — rewrite the command but do NOT auto-allow so that
+ # Claude Code prompts the user for confirmation.
+ ;;
+ *)
+ exit 0
+ ;;
+esac
_rtk_audit_log "rewrite" "$CMD" "$REWRITTEN"
@@ -57,14 +82,26 @@ _rtk_audit_log "rewrite" "$CMD" "$REWRITTEN"
ORIGINAL_INPUT=$(echo "$INPUT" | jq -c '.tool_input')
UPDATED_INPUT=$(echo "$ORIGINAL_INPUT" | jq --arg cmd "$REWRITTEN" '.command = $cmd')
-# Output the rewrite instruction in Claude Code hook format.
-jq -n \
- --argjson updated "$UPDATED_INPUT" \
- '{
- "hookSpecificOutput": {
- "hookEventName": "PreToolUse",
- "permissionDecision": "allow",
- "permissionDecisionReason": "RTK auto-rewrite",
- "updatedInput": $updated
- }
- }'
+if [ "$EXIT_CODE" -eq 3 ]; then
+ # Ask: rewrite the command, omit permissionDecision so Claude Code prompts.
+ jq -n \
+ --argjson updated "$UPDATED_INPUT" \
+ '{
+ "hookSpecificOutput": {
+ "hookEventName": "PreToolUse",
+ "updatedInput": $updated
+ }
+ }'
+else
+ # Allow: output the rewrite instruction in Claude Code hook format.
+ jq -n \
+ --argjson updated "$UPDATED_INPUT" \
+ '{
+ "hookSpecificOutput": {
+ "hookEventName": "PreToolUse",
+ "permissionDecision": "allow",
+ "permissionDecisionReason": "RTK auto-rewrite",
+ "updatedInput": $updated
+ }
+ }'
+fi
diff --git a/.claude/skills/rtk-triage/SKILL.md b/.claude/skills/rtk-triage/SKILL.md
new file mode 100644
index 000000000..9aed21a26
--- /dev/null
+++ b/.claude/skills/rtk-triage/SKILL.md
@@ -0,0 +1,241 @@
+---
+description: >
+ Triage complet RTK : exécute issue-triage + pr-triage en parallèle,
+ puis croise les données pour détecter doubles couvertures, trous sécurité,
+ P0 sans PR, et conflits internes. Sauvegarde dans claudedocs/RTK-YYYY-MM-DD.md.
+ Args: "en"/"fr" pour la langue (défaut: fr), "save" pour forcer la sauvegarde.
+allowed-tools:
+ - Bash
+ - Write
+ - Read
+ - AskUserQuestion
+---
+
+# /rtk-triage
+
+Orchestrateur de triage RTK. Fusionne issue-triage + pr-triage et produit une analyse croisée.
+
+---
+
+## Quand utiliser
+
+- Hebdomadaire ou avant chaque sprint
+- Quand le backlog PR/issues grossit rapidement
+- Pour identifier les doublons avant de reviewer
+
+---
+
+## Workflow en 4 phases
+
+### Phase 0 — Préconditions
+
+```bash
+git rev-parse --is-inside-work-tree
+gh auth status
+```
+
+Vérifier que la date actuelle est connue (utiliser `date +%Y-%m-%d`).
+
+---
+
+### Phase 1 — Data gathering (parallèle)
+
+Lancer les deux collectes simultanément :
+
+**Issues** :
+```bash
+gh repo view --json nameWithOwner -q .nameWithOwner
+
+gh issue list --state open --limit 150 \
+ --json number,title,author,createdAt,updatedAt,labels,assignees,body
+
+gh issue list --state closed --limit 20 \
+ --json number,title,labels,closedAt
+
+gh api "repos/{owner}/{repo}/collaborators" --jq '.[].login'
+```
+
+**PRs** :
+```bash
+# Fetcher toutes les PRs ouvertes — paginer si nécessaire (gh limite à 200 par appel)
+gh pr list --state open --limit 200 \
+ --json number,title,author,createdAt,updatedAt,additions,deletions,changedFiles,isDraft,mergeable,reviewDecision,statusCheckRollup,body
+
+# Si le repo a >200 PRs ouvertes, relancer avec --search pour paginer :
+# gh pr list --state open --limit 200 --search "is:pr is:open sort:updated-desc" ...
+
+# Pour chaque PR, récupérer les fichiers modifiés (nécessaire pour overlap detection)
+# Prioriser les PRs candidates (même domaine, même auteur)
+gh pr view {num} --json files --jq '[.files[].path] | join(",")'
+```
+
+---
+
+### Phase 2 — Triage individuel
+
+Exécuter les analyses de `/issue-triage` et `/pr-triage` séparément (même logique que les skills individuels) pour produire :
+
+**Issues** :
+- Catégorisation (Bug/Feature/Enhancement/Question/Duplicate)
+- Risque (Rouge/Jaune/Vert)
+- Staleness (>30j)
+- Map `issue_number → [PR numbers]` via scan `fixes #N`, `closes #N`, `resolves #N`
+
+**PRs** :
+- Taille (XS/S/M/L/XL)
+- CI status (clean/dirty)
+- Nos PRs vs externes
+- Overlaps (>50% fichiers communs entre 2 PRs)
+- Clusters (auteur avec 3+ PRs)
+
+Afficher les tableaux standards de chaque skill (voir SKILL.md de issue-triage et pr-triage pour le format exact).
+
+---
+
+### Phase 3 — Analyse croisée (cœur de ce skill)
+
+C'est ici que ce skill apporte de la valeur au-delà des deux skills individuels.
+
+#### 3.1 Double couverture — 2 PRs pour 1 issue
+
+Pour chaque issue liée à ≥2 PRs (via scan des bodies + overlap fichiers) :
+
+| Issue | PR1 (infos) | PR2 (infos) | Verdict recommandé |
+|-------|-------------|-------------|-------------------|
+| #N (titre) | PR#X — auteur, taille, CI | PR#Y — auteur, taille, CI | Garder la plus ciblée. Fermer/coordonner l'autre |
+
+Règle de verdict :
+- Préférer la plus petite (XS < S < M) si même scope
+- Préférer CI clean sur CI dirty
+- Préférer "nos PRs" si l'une est interne
+- Si overlap de fichiers >80% → conflit quasi-certain, signaler
+
+#### 3.2 Trous de couverture sécurité
+
+Pour chaque issue rouge (#640-type security review) :
+- Lister les sous-findings mentionnés dans le body
+- Croiser avec les PRs existantes (mots-clés dans titre/body)
+- Identifier les findings sans PR
+
+Format :
+```
+## Issue #N — security review (finding par finding)
+| Finding | PR associée | Status |
+|---------|-------------|--------|
+| Description finding 1 | PR#X | En review |
+| **Description finding critique** | **AUCUNE** | ⚠️ Trou |
+```
+
+#### 3.3 P0/P1 bugs sans PR
+
+Issues labelisées P0 ou P1 (ou mots-clés : "crash", "truncat", "cap", "hardcoded") sans aucune PR liée.
+
+Format :
+```
+## Bugs critiques sans PR
+| Issue | Titre | Pattern commun | Effort estimé |
+|-------|-------|----------------|---------------|
+```
+
+Chercher un pattern commun (ex: "cap hardcodé", "exit code perdu") — si 3+ bugs partagent un pattern, suggérer un sprint groupé.
+
+#### 3.4 Nos PRs dirty — causes probables
+
+Pour chaque PR interne avec CI dirty ou CONFLICTING :
+- Vérifier si un autre PR touche les mêmes fichiers
+- Vérifier si un merge récent sur develop peut expliquer le conflit
+- Recommander : rebase, fermeture, ou attente
+
+Format :
+```
+## Nos PRs dirty
+| PR | Issue(s) | Cause probable | Action |
+|----|----------|----------------|--------|
+```
+
+#### 3.5 PRs sans issue trackée
+
+PRs internes sans `fixes #N` dans le body — signaler pour traçabilité.
+
+---
+
+### Phase 4 — Output final
+
+#### Afficher l'analyse croisée complète (sections 3.1 → 3.5)
+
+Puis afficher le résumé chiffré :
+
+```
+## Résumé chiffré — YYYY-MM-DD
+
+| Catégorie | Count |
+|-----------|-------|
+| PRs prêtes à merger (nos) | N |
+| Quick wins externes | N |
+| Double couverture (conflicts) | N paires |
+| P0/P1 bugs sans PR | N |
+| Security findings sans PR | N |
+| Nos PRs dirty à rebaser | N |
+| PRs à fermer (recommandé) | N |
+```
+
+#### Sauvegarder dans claudedocs
+
+```bash
+date +%Y-%m-%d # Pour construire le nom de fichier
+```
+
+Sauvegarder dans `claudedocs/RTK-YYYY-MM-DD.md` avec :
+- Les tableaux de triage issues + PRs (Phase 2)
+- L'analyse croisée complète (Phase 3)
+- Le résumé chiffré
+
+Confirmer : `Sauvegardé dans claudedocs/RTK-YYYY-MM-DD.md`
+
+---
+
+## Format du fichier sauvegardé
+
+```markdown
+# RTK Triage — YYYY-MM-DD
+
+Croisement issues × PRs. {N} PRs ouvertes, {N} issues ouvertes.
+
+---
+
+## 1. Double couverture
+...
+
+## 2. Trous sécurité
+...
+
+## 3. P0/P1 sans PR
+...
+
+## 4. Nos PRs dirty
+...
+
+## 5. Nos PRs prêtes à merger
+...
+
+## 6. Quick wins externes
+...
+
+## 7. Actions prioritaires
+(liste ordonnée par impact/urgence)
+
+---
+
+## Résumé chiffré
+...
+```
+
+---
+
+## Règles
+
+- Langue : argument `en`/`fr`. Défaut : `fr`. Les commentaires GitHub restent toujours en anglais.
+- Ne jamais poster de commentaires GitHub sans validation utilisateur (AskUserQuestion).
+- Si >200 issues ou >200 PRs : prévenir l'utilisateur et paginer (relancer avec `--search` ou `gh api` avec pagination).
+- L'analyse croisée (Phase 3) est toujours exécutée — c'est la valeur ajoutée de ce skill.
+- Le fichier claudedocs est sauvegardé automatiquement sauf si l'utilisateur dit "no save".
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 000000000..df3e32a3d
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,13 @@
+## Summary
+
+
+-
+
+## Test plan
+
+
+- [ ] `cargo fmt --all && cargo clippy --all-targets && cargo test`
+- [ ] Manual testing: `rtk ` output inspected
+
+> **Important:** All PRs must target the `develop` branch (not `master`).
+> See [CONTRIBUTING.md](../blob/master/CONTRIBUTING.md) for details.
diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md
new file mode 100644
index 000000000..7651df48a
--- /dev/null
+++ b/.github/copilot-instructions.md
@@ -0,0 +1,127 @@
+# Copilot Instructions for rtk
+
+**rtk (Rust Token Killer)** is a CLI proxy that filters and compresses command outputs before they reach an LLM context, saving 60–90% of tokens. It wraps common tools (`git`, `cargo`, `grep`, `pnpm`, `go`, etc.) and outputs condensed summaries instead of raw output.
+
+## Using rtk in this session
+
+**Always prefix commands with `rtk` when running shell commands** — this is the entire point of the project and reduces token consumption for every operation you perform.
+
+```bash
+# Instead of: Use:
+git status rtk git status
+git log -10 rtk git log -10
+cargo test rtk cargo test
+cargo clippy --all-targets rtk cargo clippy --all-targets
+grep -r "pattern" src/ rtk grep -r "pattern" src/
+```
+
+**rtk meta-commands** (always use these directly, no prefix needed):
+```bash
+rtk gain # Show token savings analytics for this session
+rtk gain --history # Full command history with per-command savings
+rtk discover # Scan session history for missed rtk opportunities
+rtk proxy # Run a command raw (no filtering) but still track it
+```
+
+**Verify rtk is installed before starting:**
+```bash
+rtk --version # Should print: rtk X.Y.Z
+rtk gain # Should show a dashboard (not "command not found")
+```
+
+> ⚠️ **Name collision**: `rtk gain` failing means you have `reachingforthejack/rtk` (Rust Type Kit) installed instead of this project. Run `which rtk` and check the binary source.
+
+## Build, Test & Lint
+
+```bash
+# Development build
+cargo build
+
+# Run all tests
+cargo test
+
+# Run a single test by name
+cargo test test_filter_git_log
+
+# Run all tests in a module
+cargo test git::tests::
+
+# Run tests with stdout
+cargo test -- --nocapture
+
+# Pre-commit gate (must all pass before any PR)
+cargo fmt --all --check && cargo clippy --all-targets && cargo test
+
+# Smoke tests (requires installed binary)
+bash scripts/test-all.sh
+```
+
+PRs target the **`develop`** branch, not `main`. All commits require a DCO sign-off (`git commit -s`).
+
+## Architecture
+
+```
+main.rs ← Clap Commands enum → specialized module (git.rs, *_cmd.rs, etc.)
+ ↓
+ execute subprocess
+ ↓
+ filter/compress output
+ ↓
+ tracking::TimedExecution → SQLite (~/.local/share/rtk/tracking.db)
+```
+
+Key modules:
+- **`main.rs`** — Clap `Commands` enum routes every subcommand to its module. Each arm calls `tracking::TimedExecution::start()` before running, then `.track(...)` after.
+- **`filter.rs`** — Language-aware filtering with `FilterLevel` (`none` / `minimal` / `aggressive`) and `Language` enum. Used by `read` and `smart` commands.
+- **`tracking.rs`** — SQLite persistence for token savings, scoped per project path. Powers `rtk gain`.
+- **`tee.rs`** — On filter failure, saves raw output to `~/.local/share/rtk/tee/` and prints a one-line hint so the LLM can re-read without re-running the command.
+- **`utils.rs`** — Shared helpers: `truncate`, `strip_ansi`, `execute_command`, package-manager auto-detection (pnpm/yarn/npm/npx).
+
+New commands follow this structure: one file `src/_cmd.rs` with a `pub fn run(...)` entry point, registered in the `Commands` enum in `main.rs`.
+
+## Key Conventions
+
+### Error handling
+- Use `anyhow::Result` throughout (this is a binary, not a library).
+- Always attach context: `operation.context("description")?` — never bare `?` without context.
+- No `unwrap()` in production code; `expect("reason")` is acceptable only in tests.
+- Every filter must fall back to raw command execution on error — never break the user's workflow.
+
+### Regex
+- Compile once with `lazy_static!`, never inside a function body:
+ ```rust
+ lazy_static! {
+ static ref RE: Regex = Regex::new(r"pattern").unwrap();
+ }
+ ```
+
+### Testing
+- Unit tests live **inside the module file** in `#[cfg(test)] mod tests { ... }` — not in `tests/`.
+- Fixtures are real captured command output in `tests/fixtures/_raw.txt`, loaded with `include_str!("../tests/fixtures/...")`.
+- Each test module defines its own local `fn count_tokens(text: &str) -> usize` (word-split approximation) — there is no shared utility for this.
+- Token savings assertions use `assert!(savings >= 60.0, ...)`.
+- Snapshot tests use `assert_snapshot!()` from the `insta` crate; review with `cargo insta review`.
+
+### Adding a new command
+1. Create `src/_cmd.rs` with `pub fn run(...)`.
+2. Add `mod _cmd;` at the top of `main.rs`.
+3. Add a variant to the `Commands` enum with `#[arg(trailing_var_arg = true, allow_hyphen_values = true)]` for pass-through flags.
+4. Route the variant in the `match` block, wrapping execution with `tracking::TimedExecution`.
+5. Write a fixture from real output, then unit tests in the module file.
+6. Update `README.md` (command list + savings %) and `CHANGELOG.md`.
+
+### Exit codes
+Preserve the underlying command's exit code. Use `std::process::exit(code)` when the child process exits non-zero.
+
+### Performance constraints
+- Startup must stay under 10ms — no async runtime (no `tokio`/`async-std`).
+- No blocking I/O at startup; config is loaded on-demand.
+- Binary size target: <5 MB stripped.
+
+### Branch naming
+```
+fix(scope): short-description
+feat(scope): short-description
+chore(scope): short-description
+```
+`scope` is the affected component (e.g. `git`, `filter`, `tracking`).
diff --git a/.github/hooks/rtk-rewrite.json b/.github/hooks/rtk-rewrite.json
new file mode 100644
index 000000000..c488d4349
--- /dev/null
+++ b/.github/hooks/rtk-rewrite.json
@@ -0,0 +1,12 @@
+{
+ "hooks": {
+ "PreToolUse": [
+ {
+ "type": "command",
+ "command": "rtk hook",
+ "cwd": ".",
+ "timeout": 5
+ }
+ ]
+ }
+}
diff --git a/.github/workflows/CICD.md b/.github/workflows/CICD.md
index 071d234af..53776a00d 100644
--- a/.github/workflows/CICD.md
+++ b/.github/workflows/CICD.md
@@ -39,18 +39,19 @@ Trigger: pull_request to develop or master
## Merge to develop — pre-release (cd.yml)
-Trigger: push to develop | Concurrency: cancel-in-progress
+Trigger: push to develop | workflow_dispatch (not master) | Concurrency: cancel-in-progress
```
┌──────────────────┐
│ push to develop │
+ │ OR dispatch │
└────────┬─────────┘
│
┌────────▼──────────────────┐
│ pre-release │
- │ read Cargo.toml version │
- │ tag = v{ver}-rc.{run} │
- │ safety: fail if exists │
+ │ compute next version │
+ │ from conventional commits │
+ │ tag = v{next}-rc.{run} │
└────────┬──────────────────┘
│
┌────────▼──────────────────┐
@@ -74,7 +75,7 @@ Trigger: push to develop | Concurrency: cancel-in-progress
## Merge to master — stable release (cd.yml)
-Trigger: push to master | Concurrency: never cancelled
+Trigger: push to master (only) | Concurrency: never cancelled
```
┌──────────────────┐
diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml
index 49d52bffb..393f9e500 100644
--- a/.github/workflows/cd.yml
+++ b/.github/workflows/cd.yml
@@ -1,6 +1,7 @@
name: CD
on:
+ workflow_dispatch:
push:
branches: [develop, master]
@@ -18,7 +19,9 @@ jobs:
# ═══════════════════════════════════════════════
pre-release:
- if: github.ref == 'refs/heads/develop'
+ if: >-
+ github.ref == 'refs/heads/develop'
+ || (github.event_name == 'workflow_dispatch' && github.ref != 'refs/heads/master')
runs-on: ubuntu-latest
outputs:
tag: ${{ steps.tag.outputs.tag }}
@@ -26,17 +29,49 @@ jobs:
- uses: actions/checkout@v4
with:
fetch-depth: 0
+ fetch-tags: true
- - name: Compute pre-release tag
+ - name: Compute version from commits like release please
id: tag
run: |
- VERSION=$(grep '^version = ' Cargo.toml | head -1 | cut -d'"' -f2)
- TAG="v${VERSION}-rc.${{ github.run_number }}"
+ LATEST_TAG=$(git tag -l 'v[0-9]*.[0-9]*.[0-9]*' --sort=-version:refname | grep -v '-' | head -1)
+ if [ -z "$LATEST_TAG" ]; then
+ echo "::error::No stable release tag found"
+ exit 1
+ fi
+ LATEST_VERSION="${LATEST_TAG#v}"
+ echo "Latest release: $LATEST_TAG"
+
+ # ── Analyse conventional commits since that tag ──
+ COMMITS=$(git log "${LATEST_TAG}..HEAD" --format="%s")
+ HAS_BREAKING=$(echo "$COMMITS" | grep -cE '^[a-z]+(\(.+\))?!:' || true)
+ HAS_FEAT=$(echo "$COMMITS" | grep -cE '^feat(\(.+\))?:' || true)
+ HAS_FIX=$(echo "$COMMITS" | grep -cE '^fix(\(.+\))?:' || true)
+ echo "Commits since ${LATEST_TAG} — breaking=$HAS_BREAKING feat=$HAS_FEAT fix=$HAS_FIX"
- # Safety: warn if this base version is already released
- if git ls-remote --tags origin "refs/tags/v${VERSION}" | grep -q .; then
- echo "::warning::v${VERSION} already released. Consider bumping Cargo.toml on develop."
+ # ── Compute next version (matches release-please observed behaviour) ──
+ # Pre-1.0 with bump-minor-pre-major: breaking → minor, feat → minor, fix → patch
+ IFS='.' read -r MAJOR MINOR PATCH <<< "$LATEST_VERSION"
+ if [ "$MAJOR" -eq 0 ]; then
+ if [ "$HAS_BREAKING" -gt 0 ] || [ "$HAS_FEAT" -gt 0 ]; then
+ MINOR=$((MINOR + 1)); PATCH=0 # breaking or feat → minor
+ else
+ PATCH=$((PATCH + 1)) # fix only → patch
+ fi
+ else
+ if [ "$HAS_BREAKING" -gt 0 ]; then
+ MAJOR=$((MAJOR + 1)); MINOR=0; PATCH=0 # breaking → major
+ elif [ "$HAS_FEAT" -gt 0 ]; then
+ MINOR=$((MINOR + 1)); PATCH=0 # feat → minor
+ else
+ PATCH=$((PATCH + 1)) # fix → patch
+ fi
fi
+ VERSION="${MAJOR}.${MINOR}.${PATCH}"
+ TAG="dev-${VERSION}-rc.${{ github.run_number }}"
+
+ echo "Next version: $VERSION (from $LATEST_VERSION)"
+ echo "Pre-release tag: $TAG"
# Safety: fail if this exact tag already exists
if git ls-remote --tags origin "refs/tags/${TAG}" | grep -q .; then
@@ -45,7 +80,6 @@ jobs:
fi
echo "tag=$TAG" >> $GITHUB_OUTPUT
- echo "Pre-release tag: $TAG"
build-prerelease:
name: Build pre-release
@@ -64,7 +98,7 @@ jobs:
# ═══════════════════════════════════════════════
release-please:
- if: github.ref == 'refs/heads/master'
+ if: github.ref == 'refs/heads/master' && github.event_name == 'push'
runs-on: ubuntu-latest
outputs:
release_created: ${{ steps.release.outputs.release_created }}
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index eae6866c5..bad4b5d62 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -201,14 +201,6 @@ jobs:
- name: Run benchmark
run: ./scripts/benchmark.sh
- # ─── DCO: develop PRs only ───
-
- check:
- name: check
- if: github.base_ref == 'develop'
- runs-on: ubuntu-latest
- steps:
- - uses: KineticCafe/actions-dco@v1
# ─── AI Doc Review: develop PRs only ───
diff --git a/.github/workflows/pr-target-check.yml b/.github/workflows/pr-target-check.yml
new file mode 100644
index 000000000..60211f1cd
--- /dev/null
+++ b/.github/workflows/pr-target-check.yml
@@ -0,0 +1,43 @@
+name: PR Target Branch Check
+
+on:
+ pull_request_target:
+ types: [opened, edited]
+
+jobs:
+ check-target:
+ runs-on: ubuntu-latest
+ # Skip develop→master PRs (maintainer releases)
+ if: >-
+ github.event.pull_request.base.ref == 'master' &&
+ github.event.pull_request.head.ref != 'develop'
+ steps:
+ - name: Add wrong-base label
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const pr = context.payload.pull_request;
+
+ // Add label
+ await github.rest.issues.addLabels({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: pr.number,
+ labels: ['wrong-base']
+ });
+
+ // Post comment
+ const body = `👋 Thanks for the PR! It looks like this targets \`master\`, but all PRs should target the **\`develop\`** branch.
+
+ Please update the base branch:
+ 1. Click **Edit** at the top right of this PR
+ 2. Change the base branch from \`master\` to \`develop\`
+
+ See [CONTRIBUTING.md](https://github.com/${context.repo.owner}/${context.repo.repo}/blob/master/CONTRIBUTING.md) for details.`;
+
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: pr.number,
+ body: body
+ });
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 716d00463..6c3631f39 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.30.0"
+ ".": "0.33.1"
}
diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md
index 3041a7ac8..94f449054 100644
--- a/ARCHITECTURE.md
+++ b/ARCHITECTURE.md
@@ -272,6 +272,10 @@ PYTHON ruff_cmd.rs ruff check/format 80%+ ✓
GO go_cmd.rs go test/build/vet 75-90% ✓
golangci_cmd.rs golangci-lint 85% ✓
+RUBY rake_cmd.rs rake/rails test 85-90% ✓
+ rspec_cmd.rs rspec 60%+ ✓
+ rubocop_cmd.rs rubocop 60%+ ✓
+
NETWORK wget_cmd.rs wget 85-95% ✓
curl_cmd.rs curl 70% ✓
@@ -286,6 +290,7 @@ SYSTEM init.rs init N/A ✓
gain.rs gain N/A ✓
config.rs (internal) N/A ✓
rewrite_cmd.rs rewrite N/A ✓
+ permissions.rs CC permission checks N/A ✓
SHARED utils.rs Helpers N/A ✓
filter.rs Language filters N/A ✓
@@ -293,16 +298,17 @@ SHARED utils.rs Helpers N/A ✓
tee.rs Full output recovery N/A ✓
```
-**Total: 60 modules** (38 command modules + 22 infrastructure modules)
+**Total: 71 modules** (49 command modules + 22 infrastructure modules)
### Module Count Breakdown
-- **Command Modules**: 34 (directly exposed to users)
-- **Infrastructure Modules**: 20 (utils, filter, tracking, tee, config, init, gain, toml_filter, verify_cmd, etc.)
+- **Command Modules**: 45 (directly exposed to users)
+- **Infrastructure Modules**: 22 (utils, filter, tracking, tee, config, init, gain, toml_filter, verify_cmd, trust, etc.)
- **Git Commands**: 7 operations (status, diff, log, add, commit, push, branch/checkout)
- **JS/TS Tooling**: 8 modules (modern frontend/fullstack development)
- **Python Tooling**: 3 modules (ruff, pytest, pip)
- **Go Tooling**: 2 modules (go test/build/vet, golangci-lint)
+- **Ruby Tooling**: 3 modules (rake/minitest, rspec, rubocop) + 1 TOML filter (bundle install)
---
@@ -605,6 +611,37 @@ pub fn run(command: &GoCommand, verbose: u8) -> Result<()> {
- Different output format (JSON API vs text)
- Distinct use case (comprehensive linting vs single-tool diagnostics)
+### Ruby Module Architecture
+
+**Added**: 2026-03-15
+**Motivation**: Ruby on Rails development support (minitest, RSpec, RuboCop, Bundler)
+
+Ruby modules follow the standalone command pattern (like Python) with a shared `ruby_exec()` utility for auto-detecting `bundle exec`.
+
+```
+Module Strategy Output Format Savings
+─────────────────────────────────────────────────────────────────────────
+rake_cmd.rs STATE MACHINE Text parser 85-90%
+ Minitest output (rake test / rails test)
+ → State machine: Header → Running → Failures → Summary
+ → All pass: "ok rake test: 8 runs, 0 failures"
+ → Failures: summary + numbered failure details
+
+rspec_cmd.rs JSON/TEXT DUAL JSON → 60%+ 60%+
+ Injects --format json, parses structured results
+ → Fallback to text state machine when JSON unavailable
+ → Strips Spring, SimpleCov, DEPRECATION, Capybara noise
+
+rubocop_cmd.rs JSON PARSING JSON API 60%+
+ Injects --format json, groups by cop/severity
+ → Skips JSON injection in autocorrect mode (-a, -A)
+
+bundle-install.toml TOML FILTER Text rules 90%+
+ → Strips "Using" lines, short-circuits to "ok bundle: complete"
+```
+
+**Shared**: `ruby_exec(tool)` in utils.rs auto-detects `bundle exec` when `Gemfile` exists. Used by rake_cmd, rspec_cmd, rubocop_cmd.
+
### Format Strategy Decision Tree
```
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2daaece1d..831120a38 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,7 +5,7 @@ All notable changes to rtk (Rust Token Killer) will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-## [Unreleased]
+## [Unreleased — Faire fork]
### Features
@@ -22,6 +22,66 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Configurable via `[gradle]` section in config.toml: `user_packages`, `extra_drop_patterns`
- Hook integration: `./gradlew` and `gradle` commands auto-rewrite to `rtk gradle`
+## [0.33.1](https://github.com/rtk-ai/rtk/compare/v0.33.0...v0.33.1) (2026-03-25)
+
+
+### Bug Fixes
+
+* **cicd:** dev- prefix for pre-release tags ([522bd64](https://github.com/rtk-ai/rtk/commit/522bd648c8cae41f6cadedcd40a96d879c6ecf0a))
+* **cicd:** use dev- prefix for pre-release tags ([9c21275](https://github.com/rtk-ai/rtk/commit/9c212752fc0401820f8665198f00882684496175))
+* **cicd:** use dev- prefix for pre-release tags to avoid polluting release-please ([32c67e0](https://github.com/rtk-ai/rtk/commit/32c67e01326374f0365602f61542a3639a8f121b))
+* hook security + stderr redirects + version bump ([#807](https://github.com/rtk-ai/rtk/issues/807)) ([0649e97](https://github.com/rtk-ai/rtk/commit/0649e974fb8f27778ef0d22aa97905d9ebc8f03c))
+* **hook:** respect Claude Code deny/ask permission rules on rewrite ([a051a6f](https://github.com/rtk-ai/rtk/commit/a051a6f5e56c7ee59375a365580bced634e29c02))
+* strip trailing stderr redirects before rewrite matching ([#530](https://github.com/rtk-ai/rtk/issues/530)) ([edd9c02](https://github.com/rtk-ai/rtk/commit/edd9c02e892b297a7e349031b61ef971c982b53d))
+* strip trailing stderr redirects before rewrite matching ([#530](https://github.com/rtk-ai/rtk/issues/530)) ([36a6f48](https://github.com/rtk-ai/rtk/commit/36a6f482296d6fc85f8116040a16de2e128733f8))
+
+## [0.33.0-rc.54](https://github.com/rtk-ai/rtk/compare/v0.32.0-rc.54...v0.33.0-rc.54) (2026-03-24)
+
+
+### Features
+
+* **ruby:** add Ruby on Rails support (rspec, rubocop, rake, bundle) ([#724](https://github.com/rtk-ai/rtk/issues/724)) ([15bc0f8](https://github.com/rtk-ai/rtk/commit/15bc0f8d6e135371688d5fd42decc6d8a99454f0))
+
+
+### Bug Fixes
+
+* add telemetry documentation and init notice ([#640](https://github.com/rtk-ai/rtk/issues/640)) ([#788](https://github.com/rtk-ai/rtk/issues/788)) ([0eecee5](https://github.com/rtk-ai/rtk/commit/0eecee5bf35ffd8b13f36a59ec39bd52626948d3))
+* **cargo:** preserve test compile diagnostics ([97b6878](https://github.com/rtk-ai/rtk/commit/97b68783f50d209c2c599ae42cc638520749e668))
+* **cicd:** explicit fetch tag ([3b94b60](https://github.com/rtk-ai/rtk/commit/3b94b602ed24b9ecec597ce001e59f325caaadd4))
+* **cicd:** gete release like tag for pre-release ([53bc81e](https://github.com/rtk-ai/rtk/commit/53bc81e9e6d3d0876fb1a23dbf6f08bc074b68be))
+* **cicd:** issue 668 - pre release tag ([200af43](https://github.com/rtk-ai/rtk/commit/200af436d48dd2539cb00652b082f25c57873c9c))
+* **cicd:** missing doc ([8657494](https://github.com/rtk-ai/rtk/commit/865749438e67f6da7f719d054bf377d857925ad3))
+* **cicd:** pre-release correct tag ([1536667](https://github.com/rtk-ai/rtk/commit/15366678adeece701f38e91204128b070c0e3fc4))
+* **dotnet:** TRX injection for Microsoft.Testing.Platform projects ([8eefef1](https://github.com/rtk-ai/rtk/commit/8eefef1b496035ce898effc5446e6851084d6fa4))
+* **formatter:** show full error message for test failures ([#690](https://github.com/rtk-ai/rtk/issues/690)) ([dc6b026](https://github.com/rtk-ai/rtk/commit/dc6b0260ab4c1bdbccb4b775d879eb473b212c21))
+* **formatter:** show full error message for test failures ([#690](https://github.com/rtk-ai/rtk/issues/690)) ([f7b09fc](https://github.com/rtk-ai/rtk/commit/f7b09fc86a693acf2b52954215ff0c4e6c5d03f9))
+* **gh:** passthrough --comments flag in issue/pr view ([75cd223](https://github.com/rtk-ai/rtk/commit/75cd2232e274f898d8a335ba866fc507ce64b949))
+* **gh:** passthrough --comments flag in issue/pr view ([fdeb09f](https://github.com/rtk-ai/rtk/commit/fdeb09fb93564e795711e9a531d2e2e20187c3a7)), closes [#720](https://github.com/rtk-ai/rtk/issues/720)
+* **gh:** skip compact_diff for --name-only/--stat flags in pr diff ([2ef0690](https://github.com/rtk-ai/rtk/commit/2ef0690767eb733c705e4de56d02c64696a4acc6)), closes [#730](https://github.com/rtk-ai/rtk/issues/730)
+* **gh:** skip compact_diff for --name-only/--stat in pr diff ([c576249](https://github.com/rtk-ai/rtk/commit/c57624931a96181f869645817fdd96bc056da044))
+* **golangci-lint:** add v2 compatibility with runtime version detection ([95a4961](https://github.com/rtk-ai/rtk/commit/95a4961e4aa3ba5307b3dfad246c6168c4caeab8))
+* **golangci:** use resolved_command for version detection, move test fixture to file ([6aa5e90](https://github.com/rtk-ai/rtk/commit/6aa5e90dc466f87c88a2401b4eb2aa0f323379f4))
+* increase signal in git diff, git log, and json filters ([#621](https://github.com/rtk-ai/rtk/issues/621)) ([#708](https://github.com/rtk-ai/rtk/issues/708)) ([4edc3fc](https://github.com/rtk-ai/rtk/commit/4edc3fc0838e25ee6d1754c7e987b5507742f600))
+* **playwright:** add tee_and_hint pass-through on failure ([#690](https://github.com/rtk-ai/rtk/issues/690)) ([b4ccf04](https://github.com/rtk-ai/rtk/commit/b4ccf046f59ce6ed1396e4d8c46f8a35152d6d09))
+* preserve cargo test compile diagnostics ([15d5beb](https://github.com/rtk-ai/rtk/commit/15d5beb9f70caf1f84e9b506faaf840c70c1cf4e))
+* **ruby:** use rails test for positional file args in rtk rake ([ec92c43](https://github.com/rtk-ai/rtk/commit/ec92c43f231eb2321a4b423b0eb8487f98161aac))
+* **ruby:** use rails test for positional file args in rtk rake ([138e914](https://github.com/rtk-ai/rtk/commit/138e91411b4802e445a97429056cca73282d09e1))
+* update Discord invite link ([#711](https://github.com/rtk-ai/rtk/issues/711)) ([#786](https://github.com/rtk-ai/rtk/issues/786)) ([af56573](https://github.com/rtk-ai/rtk/commit/af56573ae2b234123e4685fd945980e644f40fa3))
+
+## [0.31.0](https://github.com/rtk-ai/rtk/compare/v0.30.1...v0.31.0) (2026-03-19)
+
+
+### Features
+
+* 9-tool AI agent support + emoji removal ([#704](https://github.com/rtk-ai/rtk/issues/704)) ([737dada](https://github.com/rtk-ai/rtk/commit/737dada4a56c0d7a482cc438e7280340d634f75d))
+
+## [0.30.1](https://github.com/rtk-ai/rtk/compare/v0.30.0...v0.30.1) (2026-03-18)
+
+
+### Bug Fixes
+
+* remove all decorative emojis from CLI output ([#687](https://github.com/rtk-ai/rtk/issues/687)) ([#686](https://github.com/rtk-ai/rtk/issues/686)) ([4792008](https://github.com/rtk-ai/rtk/commit/4792008fc15553cbb9aeaa602f773a5f8f7f7afe))
+
## [0.30.0](https://github.com/rtk-ai/rtk/compare/v0.29.0...v0.30.0) (2026-03-16)
diff --git a/CLAUDE.md b/CLAUDE.md
index 44aa77944..1553bc627 100644
--- a/CLAUDE.md
+++ b/CLAUDE.md
@@ -231,8 +231,11 @@ rtk gain --history | grep proxy
| gradle/ | Gradle commands | Task-type detection, global noise filters, compile/test/detekt/health/proto/deps/batch (85-99% reduction) |
| go_cmd.rs | Go commands | NDJSON for test, text for build/vet (80-90% reduction) |
| golangci_cmd.rs | golangci-lint | JSON parsing, group by rule (85% reduction) |
+| rake_cmd.rs | Minitest via rake/rails test | State machine text parser, failures only (85-90% reduction) |
+| rspec_cmd.rs | RSpec test runner | JSON injection + text fallback, failures only (60%+ reduction) |
+| rubocop_cmd.rs | RuboCop linter | JSON injection, group by cop/severity (60%+ reduction) |
| tee.rs | Full output recovery | Save raw output to file on failure, print hint for LLM re-read |
-| utils.rs | Shared utilities | Package manager detection, common formatting |
+| utils.rs | Shared utilities | Package manager detection, ruby_exec, common formatting |
| discover/ | Claude Code history analysis | Scan JSONL sessions, classify commands, report missed savings |
## Performance Constraints
@@ -393,6 +396,15 @@ pub fn execute_with_filter(cmd: &str, args: &[&str]) -> Result<()> {
- **Architecture**: Standalone Python commands (mirror lint/prettier), Go sub-enum (mirror git/cargo)
- **Patterns**: JSON for structured output (ruff check, golangci-lint, pip), NDJSON streaming (go test), text state machine (pytest), text filters (go build/vet, ruff format)
+### Ruby on Rails Support (2026-03-15)
+- **Ruby Commands**: 3 modules for Ruby/Rails development
+ - `rtk rspec`: RSpec test runner with JSON injection (`--format json`), text fallback (60%+ reduction)
+ - `rtk rubocop`: RuboCop linter with JSON injection, group by cop/severity (60%+ reduction)
+ - `rtk rake test`: Minitest filter via rake/rails test, state machine parser (85-90% reduction)
+- **TOML Filter**: `bundle-install.toml` for bundle install/update — strips `Using` lines (90%+ reduction)
+- **Shared Infrastructure**: `ruby_exec()` in utils.rs auto-detects `bundle exec` when Gemfile exists
+- **Hook Integration**: Rewrites `rspec`, `rubocop`, `rake test`, `rails test`, `bundle exec` variants
+
## Testing Strategy
### TDD Workflow (mandatory)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 0ecb18c8f..3221a21ba 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -89,15 +89,16 @@ Every change **must** include tests. See [Testing](#testing) below.
Every change **must** include documentation updates. See [Documentation](#documentation) below.
-### Developer Certificate of Origin (DCO)
+### Contributor License Agreement (CLA)
-All contributions must be signed off (git commit -s) to certify
-you have the right to submit the code under the project's license.
+All contributions require signing our [Contributor License Agreement (CLA)](CLA.md) before being merged.
-Expected format: Signed-off-by: Your Name your@email.com
-https://developercertificate.org/
+By signing, you certify that:
+- You have authored 100% of the contribution, or have the necessary rights to submit it.
+- You grant **rtk-ai** and **rtk-ai Labs** a perpetual, worldwide, royalty-free license to use your contribution — including in commercial products such as **rtk Pro** — under the [Apache License 2.0](LICENSE).
+- If your employer has rights over your work, you have obtained their permission.
-By signing off, you agree to the DCO.
+**This is automatic.** When you open a Pull Request, [CLA Assistant](https://cla-assistant.io) will post a comment asking you to sign. Click the link in that comment to sign with your GitHub account. You only need to sign once.
### 5. Merge into `develop`
diff --git a/Cargo.lock b/Cargo.lock
index 5704a5c66..174aea517 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -40,9 +40,9 @@ dependencies = [
[[package]]
name = "anstream"
-version = "1.0.0"
+version = "0.6.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d"
+checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a"
dependencies = [
"anstyle",
"anstyle-parse",
@@ -55,15 +55,15 @@ dependencies = [
[[package]]
name = "anstyle"
-version = "1.0.14"
+version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000"
+checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78"
[[package]]
name = "anstyle-parse"
-version = "1.0.0"
+version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e"
+checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2"
dependencies = [
"utf8parse",
]
@@ -139,9 +139,9 @@ checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb"
[[package]]
name = "cc"
-version = "1.2.57"
+version = "1.2.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423"
+checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2"
dependencies = [
"find-msvc-tools",
"shlex",
@@ -168,9 +168,9 @@ dependencies = [
[[package]]
name = "clap"
-version = "4.6.0"
+version = "4.5.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b193af5b67834b676abd72466a96c1024e6a6ad978a1f484bd90b85c94041351"
+checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a"
dependencies = [
"clap_builder",
"clap_derive",
@@ -178,9 +178,9 @@ dependencies = [
[[package]]
name = "clap_builder"
-version = "4.6.0"
+version = "4.5.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f"
+checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876"
dependencies = [
"anstream",
"anstyle",
@@ -190,9 +190,9 @@ dependencies = [
[[package]]
name = "clap_derive"
-version = "4.6.0"
+version = "4.5.55"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1110bd8a634a1ab8cb04345d8d878267d57c3cf1b38d91b71af6686408bbca6a"
+checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5"
dependencies = [
"heck",
"proc-macro2",
@@ -202,15 +202,15 @@ dependencies = [
[[package]]
name = "clap_lex"
-version = "1.1.0"
+version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9"
+checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831"
[[package]]
name = "colorchoice"
-version = "1.0.5"
+version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570"
+checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
[[package]]
name = "colored"
@@ -341,6 +341,12 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0"
+[[package]]
+name = "env_home"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe"
+
[[package]]
name = "equivalent"
version = "1.0.2"
@@ -711,9 +717,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
[[package]]
name = "libc"
-version = "0.2.183"
+version = "0.2.182"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d"
+checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112"
[[package]]
name = "libredox"
@@ -780,9 +786,9 @@ dependencies = [
[[package]]
name = "once_cell"
-version = "1.21.4"
+version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50"
+checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "once_cell_polyfill"
@@ -916,7 +922,7 @@ dependencies = [
[[package]]
name = "rtk"
-version = "0.30.0-faire"
+version = "0.33.1-faire"
dependencies = [
"anyhow",
"chrono",
@@ -1155,9 +1161,9 @@ dependencies = [
[[package]]
name = "tempfile"
-version = "3.27.0"
+version = "3.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd"
+checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0"
dependencies = [
"fastrand",
"getrandom 0.4.2",
@@ -1446,11 +1452,13 @@ dependencies = [
[[package]]
name = "which"
-version = "8.0.2"
+version = "8.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81995fafaaaf6ae47a7d0cc83c67caf92aeb7e5331650ae6ff856f7c0c60c459"
+checksum = "3a824aeba0fbb27264f815ada4cff43d65b1741b7a4ed7629ff9089148c4a4e0"
dependencies = [
- "libc",
+ "env_home",
+ "rustix",
+ "winsafe",
]
[[package]]
@@ -1687,6 +1695,12 @@ dependencies = [
"memchr",
]
+[[package]]
+name = "winsafe"
+version = "0.0.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904"
+
[[package]]
name = "wit-bindgen"
version = "0.51.0"
@@ -1806,18 +1820,18 @@ dependencies = [
[[package]]
name = "zerocopy"
-version = "0.8.42"
+version = "0.8.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2578b716f8a7a858b7f02d5bd870c14bf4ddbbcf3a4c05414ba6503640505e3"
+checksum = "a789c6e490b576db9f7e6b6d661bcc9799f7c0ac8352f56ea20193b2681532e5"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
-version = "0.8.42"
+version = "0.8.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7e6cc098ea4d3bd6246687de65af3f920c430e236bee1e3bf2e441463f08a02f"
+checksum = "f65c489a7071a749c849713807783f70672b28094011623e200cb86dcb835953"
dependencies = [
"proc-macro2",
"quote",
diff --git a/Cargo.toml b/Cargo.toml
index aa0289239..c1789a847 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "rtk"
-version = "0.30.0-faire"
+version = "0.33.1-faire"
edition = "2021"
authors = ["Patrick Szymkowiak"]
description = "Rust Token Killer - High-performance CLI proxy to minimize LLM token consumption"
diff --git a/LICENSE b/LICENSE
index 5c5efcd47..0afaf4b97 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,21 +1,190 @@
-MIT License
-
-Copyright (c) 2024 Patrick Szymkowiak
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2024 rtk-ai and rtk-ai Labs
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
index f5378d38b..02db8601e 100644
--- a/README.md
+++ b/README.md
@@ -10,7 +10,7 @@
-
+
@@ -19,7 +19,7 @@
Install •
Troubleshooting •
Architecture •
- Discord
+ Discord
@@ -100,12 +100,15 @@ rtk gain # Should show token savings stats
## Quick Start
```bash
-# 1. Install hook for Claude Code (recommended)
-rtk init --global
-# Follow instructions to register in ~/.claude/settings.json
-# Claude Code only by default (use --opencode for OpenCode)
-
-# 2. Restart Claude Code, then test
+# 1. Install for your AI tool
+rtk init -g # Claude Code / Copilot (default)
+rtk init -g --gemini # Gemini CLI
+rtk init -g --codex # Codex (OpenAI)
+rtk init -g --agent cursor # Cursor
+rtk init --agent windsurf # Windsurf
+rtk init --agent cline # Cline / Roo Code
+
+# 2. Restart your AI tool, then test
git status # Automatically rewritten to rtk git status
```
@@ -172,6 +175,8 @@ rtk playwright test # E2E results (failures only)
rtk pytest # Python tests (-90%)
rtk go test # Go tests (NDJSON, -90%)
rtk cargo test # Cargo tests (-90%)
+rtk rake test # Ruby minitest (-90%)
+rtk rspec # RSpec tests (JSON, -60%+)
```
### Build & Lint
@@ -185,6 +190,7 @@ rtk cargo build # Cargo build (-80%)
rtk cargo clippy # Cargo clippy (-80%)
rtk ruff check # Python linting (JSON, -80%)
rtk golangci-lint run # Go linting (JSON, -85%)
+rtk rubocop # Ruby linting (JSON, -60%+)
```
### Gradle
@@ -210,6 +216,7 @@ Gradle support includes:
rtk pnpm list # Compact dependency tree
rtk pip list # Python packages (auto-detect uv)
rtk pip outdated # Outdated packages
+rtk bundle install # Ruby gems (strip Using lines)
rtk prisma generate # Schema generation (no ASCII art)
```
@@ -306,28 +313,96 @@ rtk init --show # Verify installation
After install, **restart Claude Code**.
-## OpenCode Plugin (Global)
+## Supported AI Tools
+
+RTK supports 9 AI coding tools. Each integration transparently rewrites shell commands to `rtk` equivalents for 60-90% token savings.
+
+| Tool | Install | Method |
+|------|---------|--------|
+| **Claude Code** | `rtk init -g` | PreToolUse hook (bash) |
+| **GitHub Copilot** | `rtk init -g` | PreToolUse hook (`rtk hook copilot`) |
+| **Cursor** | `rtk init -g --agent cursor` | preToolUse hook (hooks.json) |
+| **Gemini CLI** | `rtk init -g --gemini` | BeforeTool hook (`rtk hook gemini`) |
+| **Codex** | `rtk init -g --codex` | AGENTS.md + RTK.md instructions |
+| **Windsurf** | `rtk init --agent windsurf` | .windsurfrules (project-scoped) |
+| **Cline / Roo Code** | `rtk init --agent cline` | .clinerules (project-scoped) |
+| **OpenCode** | `rtk init -g --opencode` | Plugin TS (tool.execute.before) |
+| **OpenClaw** | `openclaw plugins install ./openclaw` | Plugin TS (before_tool_call) |
+
+### Claude Code (default)
+
+```bash
+rtk init -g # Install hook + RTK.md
+rtk init -g --auto-patch # Non-interactive (CI/CD)
+rtk init --show # Verify installation
+rtk init -g --uninstall # Remove
+```
+
+### GitHub Copilot (VS Code + CLI)
+
+```bash
+rtk init -g # Same hook as Claude Code
+```
+
+The hook auto-detects Copilot format (VS Code `runTerminalCommand` or CLI `toolName: bash`) and rewrites commands. Works with both Copilot Chat in VS Code and `copilot` CLI.
+
+### Cursor
+
+```bash
+rtk init -g --agent cursor
+```
+
+Creates `~/.cursor/hooks/rtk-rewrite.sh` + patches `~/.cursor/hooks.json` with preToolUse matcher. Works with both Cursor editor and `cursor-agent` CLI.
+
+### Gemini CLI
+
+```bash
+rtk init -g --gemini
+rtk init -g --gemini --uninstall
+```
-OpenCode supports plugins that can intercept tool execution. RTK provides a global plugin that mirrors the Claude auto-rewrite behavior by rewriting Bash tool commands to `rtk ...` before they execute. This plugin is **not** installed by default.
+Creates `~/.gemini/hooks/rtk-hook-gemini.sh` + patches `~/.gemini/settings.json` with BeforeTool hook.
-> **Note**: This plugin uses OpenCode's `tool.execute.before` hook. Known limitation: plugin hooks do not intercept subagent tool calls ([upstream issue](https://github.com/sst/opencode/issues/5894)). See [OpenCode plugin docs](https://open-code.ai/en/docs/plugins) for API details.
+### Codex (OpenAI)
+
+```bash
+rtk init -g --codex
+```
+
+Creates `~/.codex/RTK.md` + `~/.codex/AGENTS.md` with `@RTK.md` reference. Codex reads these as global instructions.
+
+### Windsurf
+
+```bash
+rtk init --agent windsurf
+```
+
+Creates `.windsurfrules` in the current project. Cascade reads rules and prefixes commands with `rtk`.
+
+### Cline / Roo Code
+
+```bash
+rtk init --agent cline
+```
+
+Creates `.clinerules` in the current project. Cline reads rules and prefixes commands with `rtk`.
+
+### OpenCode
-**Install OpenCode plugin:**
```bash
rtk init -g --opencode
```
-**What it creates:**
-- `~/.config/opencode/plugins/rtk.ts`
+Creates `~/.config/opencode/plugins/rtk.ts`. Uses `tool.execute.before` hook.
-**Restart Required**: Restart OpenCode, then test with `git status` in a session.
+### OpenClaw
-**Manual install (fallback):**
```bash
-mkdir -p ~/.config/opencode/plugins
-cp hooks/opencode-rtk.ts ~/.config/opencode/plugins/rtk.ts
+openclaw plugins install ./openclaw
```
+Plugin in `openclaw/` directory. Uses `before_tool_call` hook, delegates to `rtk rewrite`.
+
### Commands Rewritten
| Raw Command | Rewritten To |
@@ -350,6 +425,10 @@ cp hooks/opencode-rtk.ts ~/.config/opencode/plugins/rtk.ts
| `go test/build/vet` | `rtk go ...` |
| `golangci-lint` | `rtk golangci-lint` |
| `gradle/gradlew ` | `rtk gradle ...` |
+| `rake test` / `rails test` | `rtk rake test` |
+| `rspec` / `bundle exec rspec` | `rtk rspec` |
+| `rubocop` / `bundle exec rubocop` | `rtk rubocop` |
+| `bundle install/update` | `rtk bundle ...` |
| `docker ps/images/logs` | `rtk docker ...` |
| `kubectl get/logs` | `rtk kubectl ...` |
| `curl` | `rtk curl` |
@@ -406,11 +485,33 @@ brew uninstall rtk # If installed via Homebrew
- **[SECURITY.md](SECURITY.md)** - Security policy and PR review process
- **[AUDIT_GUIDE.md](docs/AUDIT_GUIDE.md)** - Token savings analytics guide
+## Privacy & Telemetry
+
+RTK collects **anonymous, aggregate usage metrics** once per day to help prioritize development. This is standard practice for open-source CLI tools.
+
+**What is collected:**
+- Device hash (SHA-256 of hostname+username, not reversible)
+- RTK version, OS, architecture
+- Command count (last 24h) and top command names (e.g. "git", "cargo" — no arguments, no file paths)
+- Token savings percentage
+
+**What is NOT collected:** source code, file paths, command arguments, secrets, environment variables, or any personally identifiable information.
+
+**Opt-out** (any of these):
+```bash
+# Environment variable
+export RTK_TELEMETRY_DISABLED=1
+
+# Or in config file (~/.config/rtk/config.toml)
+[telemetry]
+enabled = false
+```
+
## Contributing
Contributions welcome! Please open an issue or PR on [GitHub](https://github.com/rtk-ai/rtk).
-Join the community on [Discord](https://discord.gg/pvHdzAec).
+Join the community on [Discord](https://discord.gg/RySmvNF5kF).
## License
diff --git a/README_es.md b/README_es.md
index c05da9367..c099d6649 100644
--- a/README_es.md
+++ b/README_es.md
@@ -10,7 +10,7 @@
-
+
@@ -19,7 +19,7 @@
Instalar •
Solucion de problemas •
Arquitectura •
- Discord
+ Discord
@@ -152,7 +152,7 @@ rtk discover # Descubrir ahorros perdidos
Las contribuciones son bienvenidas. Abre un issue o PR en [GitHub](https://github.com/rtk-ai/rtk).
-Unete a la comunidad en [Discord](https://discord.gg/pvHdzAec).
+Unete a la comunidad en [Discord](https://discord.gg/RySmvNF5kF).
## Licencia
diff --git a/README_fr.md b/README_fr.md
index b8c71734d..4c5e749da 100644
--- a/README_fr.md
+++ b/README_fr.md
@@ -10,7 +10,7 @@
-
+
@@ -19,7 +19,7 @@
Installer •
Depannage •
Architecture •
- Discord
+ Discord
@@ -190,7 +190,7 @@ mode = "failures"
Les contributions sont les bienvenues ! Ouvrez une issue ou une PR sur [GitHub](https://github.com/rtk-ai/rtk).
-Rejoignez la communaute sur [Discord](https://discord.gg/pvHdzAec).
+Rejoignez la communaute sur [Discord](https://discord.gg/RySmvNF5kF).
## Licence
diff --git a/README_ja.md b/README_ja.md
index a6e7dc227..6c690affa 100644
--- a/README_ja.md
+++ b/README_ja.md
@@ -10,7 +10,7 @@
-
+
@@ -19,7 +19,7 @@
インストール •
トラブルシューティング •
アーキテクチャ •
- Discord
+ Discord
@@ -152,7 +152,7 @@ rtk discover # 見逃した節約機会を発見
コントリビューション歓迎 で issue または PR を作成してください。
-[Discord](https://discord.gg/pvHdzAec) コミュニティに参加。
+[Discord](https://discord.gg/RySmvNF5kF) コミュニティに参加。
## ライセンス
diff --git a/README_ko.md b/README_ko.md
index b9eca7246..5d3b1a0b2 100644
--- a/README_ko.md
+++ b/README_ko.md
@@ -10,7 +10,7 @@
-
+
@@ -19,7 +19,7 @@
설치 •
문제 해결 •
아키텍처 •
- Discord
+ Discord
@@ -152,7 +152,7 @@ rtk discover # 놓친 절약 기회 발견
기여를 환영합니다! [GitHub](https://github.com/rtk-ai/rtk)에서 issue 또는 PR을 생성해 주세요.
-[Discord](https://discord.gg/pvHdzAec) 커뮤니티에 참여하세요.
+[Discord](https://discord.gg/RySmvNF5kF) 커뮤니티에 참여하세요.
## 라이선스
diff --git a/README_zh.md b/README_zh.md
index bd7fce8d5..00b9c001f 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -10,7 +10,7 @@
-
+
@@ -19,7 +19,7 @@
安装 •
故障排除 •
架构 •
- Discord
+ Discord
@@ -160,7 +160,7 @@ rtk discover # 发现遗漏的节省机会
欢迎贡献!请在 [GitHub](https://github.com/rtk-ai/rtk) 上提交 issue 或 PR。
-加入 [Discord](https://discord.gg/pvHdzAec) 社区。
+加入 [Discord](https://discord.gg/RySmvNF5kF) 社区。
## 许可证
diff --git a/hooks/cline-rtk-rules.md b/hooks/cline-rtk-rules.md
new file mode 100644
index 000000000..7d6c8aff5
--- /dev/null
+++ b/hooks/cline-rtk-rules.md
@@ -0,0 +1,32 @@
+# RTK - Rust Token Killer (Cline)
+
+**Usage**: Token-optimized CLI proxy for shell commands.
+
+## Rule
+
+Always prefix shell commands with `rtk` to minimize token consumption.
+
+Examples:
+
+```bash
+rtk git status
+rtk cargo test
+rtk ls src/
+rtk grep "pattern" src/
+rtk find "*.rs" .
+rtk docker ps
+rtk gh pr list
+```
+
+## Meta Commands
+
+```bash
+rtk gain # Show token savings
+rtk gain --history # Command history with savings
+rtk discover # Find missed RTK opportunities
+rtk proxy # Run raw (no filtering, for debugging)
+```
+
+## Why
+
+RTK filters and compresses command output before it reaches the LLM context, saving 60-90% tokens on common operations. Always use `rtk ` instead of raw commands.
diff --git a/hooks/copilot-rtk-awareness.md b/hooks/copilot-rtk-awareness.md
new file mode 100644
index 000000000..185f460c5
--- /dev/null
+++ b/hooks/copilot-rtk-awareness.md
@@ -0,0 +1,60 @@
+# RTK — Copilot Integration (VS Code Copilot Chat + Copilot CLI)
+
+**Usage**: Token-optimized CLI proxy (60-90% savings on dev operations)
+
+## What's automatic
+
+The `.github/copilot-instructions.md` file is loaded at session start by both Copilot CLI and VS Code Copilot Chat.
+It instructs Copilot to prefix commands with `rtk` automatically.
+
+The `.github/hooks/rtk-rewrite.json` hook adds a `PreToolUse` safety net via `rtk hook` —
+a cross-platform Rust binary that intercepts raw bash tool calls and rewrites them.
+No shell scripts, no `jq` dependency, works on Windows natively.
+
+## Meta commands (always use directly)
+
+```bash
+rtk gain # Token savings dashboard for this session
+rtk gain --history # Per-command history with savings %
+rtk discover # Scan session history for missed rtk opportunities
+rtk proxy # Run raw (no filtering) but still track it
+```
+
+## Installation verification
+
+```bash
+rtk --version # Should print: rtk X.Y.Z
+rtk gain # Should show a dashboard (not "command not found")
+which rtk # Verify correct binary path
+```
+
+> ⚠️ **Name collision**: If `rtk gain` fails, you may have `reachingforthejack/rtk`
+> (Rust Type Kit) installed instead. Check `which rtk` and reinstall from rtk-ai/rtk.
+
+## How the hook works
+
+`rtk hook` reads `PreToolUse` JSON from stdin, detects the agent format, and responds appropriately:
+
+**VS Code Copilot Chat** (supports `updatedInput` — transparent rewrite, no denial):
+1. Agent runs `git status` → `rtk hook` intercepts via `PreToolUse`
+2. `rtk hook` detects VS Code format (`tool_name`/`tool_input` keys)
+3. Returns `hookSpecificOutput.updatedInput.command = "rtk git status"`
+4. Agent runs the rewritten command silently — no denial, no retry
+
+**GitHub Copilot CLI** (deny-with-suggestion — CLI ignores `updatedInput` today, see [issue #2013](https://github.com/github/copilot-cli/issues/2013)):
+1. Agent runs `git status` → `rtk hook` intercepts via `PreToolUse`
+2. `rtk hook` detects Copilot CLI format (`toolName`/`toolArgs` keys)
+3. Returns `permissionDecision: deny` with reason: `"Token savings: use 'rtk git status' instead"`
+4. Copilot reads the reason and re-runs `rtk git status`
+
+When Copilot CLI adds `updatedInput` support, only `rtk hook` needs updating — no config changes.
+
+## Integration comparison
+
+| Tool | Mechanism | Hook output | File |
+|-----------------------|-----------------------------------------|--------------------------|------------------------------------|
+| Claude Code | `PreToolUse` hook with `updatedInput` | Transparent rewrite | `hooks/rtk-rewrite.sh` |
+| VS Code Copilot Chat | `PreToolUse` hook with `updatedInput` | Transparent rewrite | `.github/hooks/rtk-rewrite.json` |
+| GitHub Copilot CLI | `PreToolUse` deny-with-suggestion | Denial + retry | `.github/hooks/rtk-rewrite.json` |
+| OpenCode | Plugin `tool.execute.before` | Transparent rewrite | `hooks/opencode-rtk.ts` |
+| (any) | Custom instructions | Prompt-level guidance | `.github/copilot-instructions.md` |
diff --git a/hooks/cursor-rtk-rewrite.sh b/hooks/cursor-rtk-rewrite.sh
new file mode 100755
index 000000000..4b80b260c
--- /dev/null
+++ b/hooks/cursor-rtk-rewrite.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+# rtk-hook-version: 1
+# RTK Cursor Agent hook — rewrites shell commands to use rtk for token savings.
+# Works with both Cursor editor and cursor-cli (they share ~/.cursor/hooks.json).
+# Cursor preToolUse hook format: receives JSON on stdin, returns JSON on stdout.
+# Requires: rtk >= 0.23.0, jq
+#
+# This is a thin delegating hook: all rewrite logic lives in `rtk rewrite`,
+# which is the single source of truth (src/discover/registry.rs).
+# To add or change rewrite rules, edit the Rust registry — not this file.
+
+if ! command -v jq &>/dev/null; then
+ echo "[rtk] WARNING: jq is not installed. Hook cannot rewrite commands. Install jq: https://jqlang.github.io/jq/download/" >&2
+ exit 0
+fi
+
+if ! command -v rtk &>/dev/null; then
+ echo "[rtk] WARNING: rtk is not installed or not in PATH. Hook cannot rewrite commands. Install: https://github.com/rtk-ai/rtk#installation" >&2
+ exit 0
+fi
+
+# Version guard: rtk rewrite was added in 0.23.0.
+RTK_VERSION=$(rtk --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1)
+if [ -n "$RTK_VERSION" ]; then
+ MAJOR=$(echo "$RTK_VERSION" | cut -d. -f1)
+ MINOR=$(echo "$RTK_VERSION" | cut -d. -f2)
+ if [ "$MAJOR" -eq 0 ] && [ "$MINOR" -lt 23 ]; then
+ echo "[rtk] WARNING: rtk $RTK_VERSION is too old (need >= 0.23.0). Upgrade: cargo install rtk" >&2
+ exit 0
+ fi
+fi
+
+INPUT=$(cat)
+CMD=$(echo "$INPUT" | jq -r '.tool_input.command // empty')
+
+if [ -z "$CMD" ]; then
+ echo '{}'
+ exit 0
+fi
+
+# Delegate all rewrite logic to the Rust binary.
+# rtk rewrite exits 1 when there's no rewrite — hook passes through silently.
+REWRITTEN=$(rtk rewrite "$CMD" 2>/dev/null) || { echo '{}'; exit 0; }
+
+# No change — nothing to do.
+if [ "$CMD" = "$REWRITTEN" ]; then
+ echo '{}'
+ exit 0
+fi
+
+jq -n --arg cmd "$REWRITTEN" '{
+ "permission": "allow",
+ "updated_input": { "command": $cmd }
+}'
diff --git a/hooks/rtk-awareness-codex.md b/hooks/rtk-awareness-codex.md
new file mode 100644
index 000000000..7ae285e1a
--- /dev/null
+++ b/hooks/rtk-awareness-codex.md
@@ -0,0 +1,32 @@
+# RTK - Rust Token Killer (Codex CLI)
+
+**Usage**: Token-optimized CLI proxy for shell commands.
+
+## Rule
+
+Always prefix shell commands with `rtk`.
+
+Examples:
+
+```bash
+rtk git status
+rtk cargo test
+rtk npm run build
+rtk pytest -q
+```
+
+## Meta Commands
+
+```bash
+rtk gain # Token savings analytics
+rtk gain --history # Recent command savings history
+rtk proxy # Run raw command without filtering
+```
+
+## Verification
+
+```bash
+rtk --version
+rtk gain
+which rtk
+```
diff --git a/hooks/rtk-rewrite.sh b/hooks/rtk-rewrite.sh
index c9c00f47c..f7a42b5d4 100644
--- a/hooks/rtk-rewrite.sh
+++ b/hooks/rtk-rewrite.sh
@@ -1,11 +1,17 @@
#!/usr/bin/env bash
-# rtk-hook-version: 2
+# rtk-hook-version: 3
# RTK Claude Code hook — rewrites commands to use rtk for token savings.
# Requires: rtk >= 0.23.0, jq
#
# This is a thin delegating hook: all rewrite logic lives in `rtk rewrite`,
# which is the single source of truth (src/discover/registry.rs).
# To add or change rewrite rules, edit the Rust registry — not this file.
+#
+# Exit code protocol for `rtk rewrite`:
+# 0 + stdout Rewrite found, no deny/ask rule matched → auto-allow
+# 1 No RTK equivalent → pass through unchanged
+# 2 Deny rule matched → pass through (Claude Code native deny handles it)
+# 3 + stdout Ask rule matched → rewrite but let Claude Code prompt the user
if ! command -v jq &>/dev/null; then
echo "[rtk] WARNING: jq is not installed. Hook cannot rewrite commands. Install jq: https://jqlang.github.io/jq/download/" >&2
@@ -37,25 +43,56 @@ if [ -z "$CMD" ]; then
exit 0
fi
-# Delegate all rewrite logic to the Rust binary.
-# rtk rewrite exits 1 when there's no rewrite — hook passes through silently.
-REWRITTEN=$(rtk rewrite "$CMD" 2>/dev/null) || exit 0
+# Delegate all rewrite + permission logic to the Rust binary.
+REWRITTEN=$(rtk rewrite "$CMD" 2>/dev/null)
+EXIT_CODE=$?
-# No change — nothing to do.
-if [ "$CMD" = "$REWRITTEN" ]; then
- exit 0
-fi
+case $EXIT_CODE in
+ 0)
+ # Rewrite found, no permission rules matched — safe to auto-allow.
+ # If the output is identical, the command was already using RTK.
+ [ "$CMD" = "$REWRITTEN" ] && exit 0
+ ;;
+ 1)
+ # No RTK equivalent — pass through unchanged.
+ exit 0
+ ;;
+ 2)
+ # Deny rule matched — let Claude Code's native deny rule handle it.
+ exit 0
+ ;;
+ 3)
+ # Ask rule matched — rewrite the command but do NOT auto-allow so that
+ # Claude Code prompts the user for confirmation.
+ ;;
+ *)
+ exit 0
+ ;;
+esac
ORIGINAL_INPUT=$(echo "$INPUT" | jq -c '.tool_input')
UPDATED_INPUT=$(echo "$ORIGINAL_INPUT" | jq --arg cmd "$REWRITTEN" '.command = $cmd')
-jq -n \
- --argjson updated "$UPDATED_INPUT" \
- '{
- "hookSpecificOutput": {
- "hookEventName": "PreToolUse",
- "permissionDecision": "allow",
- "permissionDecisionReason": "RTK auto-rewrite",
- "updatedInput": $updated
- }
- }'
+if [ "$EXIT_CODE" -eq 3 ]; then
+ # Ask: rewrite the command, omit permissionDecision so Claude Code prompts.
+ jq -n \
+ --argjson updated "$UPDATED_INPUT" \
+ '{
+ "hookSpecificOutput": {
+ "hookEventName": "PreToolUse",
+ "updatedInput": $updated
+ }
+ }'
+else
+ # Allow: rewrite the command and auto-allow.
+ jq -n \
+ --argjson updated "$UPDATED_INPUT" \
+ '{
+ "hookSpecificOutput": {
+ "hookEventName": "PreToolUse",
+ "permissionDecision": "allow",
+ "permissionDecisionReason": "RTK auto-rewrite",
+ "updatedInput": $updated
+ }
+ }'
+fi
diff --git a/hooks/test-copilot-rtk-rewrite.sh b/hooks/test-copilot-rtk-rewrite.sh
new file mode 100755
index 000000000..f1cca9497
--- /dev/null
+++ b/hooks/test-copilot-rtk-rewrite.sh
@@ -0,0 +1,293 @@
+#!/usr/bin/env bash
+# Test suite for rtk hook (cross-platform preToolUse handler).
+# Feeds mock preToolUse JSON through `rtk hook` and verifies allow/deny decisions.
+#
+# Usage: bash hooks/test-copilot-rtk-rewrite.sh
+#
+# Copilot CLI input format:
+# {"toolName":"bash","toolArgs":"{\"command\":\"...\"}"}
+# Output on intercept: {"permissionDecision":"deny","permissionDecisionReason":"..."}
+#
+# VS Code Copilot Chat input format:
+# {"tool_name":"Bash","tool_input":{"command":"..."}}
+# Output on intercept: {"hookSpecificOutput":{"permissionDecision":"allow","updatedInput":{...}}}
+#
+# Output on pass-through: empty (exit 0)
+
+RTK="${RTK:-rtk}"
+PASS=0
+FAIL=0
+TOTAL=0
+
+# Colors
+GREEN='\033[32m'
+RED='\033[31m'
+DIM='\033[2m'
+RESET='\033[0m'
+
+# Build a Copilot CLI preToolUse input JSON
+copilot_bash_input() {
+ local cmd="$1"
+ local tool_args
+ tool_args=$(jq -cn --arg cmd "$cmd" '{"command":$cmd}')
+ jq -cn --arg ta "$tool_args" '{"toolName":"bash","toolArgs":$ta}'
+}
+
+# Build a VS Code Copilot Chat preToolUse input JSON
+vscode_bash_input() {
+ local cmd="$1"
+ jq -cn --arg cmd "$cmd" '{"tool_name":"Bash","tool_input":{"command":$cmd}}'
+}
+
+# Build a non-bash tool input
+tool_input() {
+ local tool_name="$1"
+ jq -cn --arg t "$tool_name" '{"toolName":$t,"toolArgs":"{}"}'
+}
+
+# Assert Copilot CLI: hook denies and reason contains the expected rtk command
+test_deny() {
+ local description="$1"
+ local input_cmd="$2"
+ local expected_rtk="$3"
+ TOTAL=$((TOTAL + 1))
+
+ local output
+ output=$(copilot_bash_input "$input_cmd" | "$RTK" hook 2>/dev/null) || true
+
+ local decision reason
+ decision=$(echo "$output" | jq -r '.permissionDecision // empty' 2>/dev/null)
+ reason=$(echo "$output" | jq -r '.permissionDecisionReason // empty' 2>/dev/null)
+
+ if [ "$decision" = "deny" ] && echo "$reason" | grep -qF "$expected_rtk"; then
+ printf " ${GREEN}DENY${RESET} %s ${DIM}→ %s${RESET}\n" "$description" "$expected_rtk"
+ PASS=$((PASS + 1))
+ else
+ printf " ${RED}FAIL${RESET} %s\n" "$description"
+ printf " expected decision: deny, reason containing: %s\n" "$expected_rtk"
+ printf " actual decision: %s\n" "$decision"
+ printf " actual reason: %s\n" "$reason"
+ FAIL=$((FAIL + 1))
+ fi
+}
+
+# Assert VS Code Copilot Chat: hook returns updatedInput (allow) with rewritten command
+test_vscode_rewrite() {
+ local description="$1"
+ local input_cmd="$2"
+ local expected_rtk="$3"
+ TOTAL=$((TOTAL + 1))
+
+ local output
+ output=$(vscode_bash_input "$input_cmd" | "$RTK" hook 2>/dev/null) || true
+
+ local decision updated_cmd
+ decision=$(echo "$output" | jq -r '.hookSpecificOutput.permissionDecision // empty' 2>/dev/null)
+ updated_cmd=$(echo "$output" | jq -r '.hookSpecificOutput.updatedInput.command // empty' 2>/dev/null)
+
+ if [ "$decision" = "allow" ] && echo "$updated_cmd" | grep -qF "$expected_rtk"; then
+ printf " ${GREEN}REWRITE${RESET} %s ${DIM}→ %s${RESET}\n" "$description" "$updated_cmd"
+ PASS=$((PASS + 1))
+ else
+ printf " ${RED}FAIL${RESET} %s\n" "$description"
+ printf " expected decision: allow, updatedInput containing: %s\n" "$expected_rtk"
+ printf " actual decision: %s\n" "$decision"
+ printf " actual updatedInput: %s\n" "$updated_cmd"
+ FAIL=$((FAIL + 1))
+ fi
+}
+
+# Assert the hook emits no output (pass-through)
+test_allow() {
+ local description="$1"
+ local input="$2"
+ TOTAL=$((TOTAL + 1))
+
+ local output
+ output=$(echo "$input" | "$RTK" hook 2>/dev/null) || true
+
+ if [ -z "$output" ]; then
+ printf " ${GREEN}PASS${RESET} %s ${DIM}→ (allow)${RESET}\n" "$description"
+ PASS=$((PASS + 1))
+ else
+ local decision
+ decision=$(echo "$output" | jq -r '.permissionDecision // .hookSpecificOutput.permissionDecision // empty' 2>/dev/null)
+ printf " ${RED}FAIL${RESET} %s\n" "$description"
+ printf " expected: (no output)\n"
+ printf " actual: permissionDecision=%s\n" "$decision"
+ FAIL=$((FAIL + 1))
+ fi
+}
+
+echo "============================================"
+echo " RTK Hook Test Suite (rtk hook)"
+echo "============================================"
+echo ""
+
+# ---- SECTION 1: Copilot CLI — commands that should be denied ----
+echo "--- Copilot CLI: intercepted (deny with rtk suggestion) ---"
+
+test_deny "git status" \
+ "git status" \
+ "rtk git status"
+
+test_deny "git log --oneline -10" \
+ "git log --oneline -10" \
+ "rtk git log"
+
+test_deny "git diff HEAD" \
+ "git diff HEAD" \
+ "rtk git diff"
+
+test_deny "cargo test" \
+ "cargo test" \
+ "rtk cargo test"
+
+test_deny "cargo clippy --all-targets" \
+ "cargo clippy --all-targets" \
+ "rtk cargo clippy"
+
+test_deny "cargo build" \
+ "cargo build" \
+ "rtk cargo build"
+
+test_deny "grep -rn pattern src/" \
+ "grep -rn pattern src/" \
+ "rtk grep"
+
+test_deny "gh pr list" \
+ "gh pr list" \
+ "rtk gh"
+
+echo ""
+
+# ---- SECTION 2: VS Code Copilot Chat — commands that should be rewritten via updatedInput ----
+echo "--- VS Code Copilot Chat: intercepted (updatedInput rewrite) ---"
+
+test_vscode_rewrite "git status" \
+ "git status" \
+ "rtk git status"
+
+test_vscode_rewrite "cargo test" \
+ "cargo test" \
+ "rtk cargo test"
+
+test_vscode_rewrite "gh pr list" \
+ "gh pr list" \
+ "rtk gh"
+
+echo ""
+
+# ---- SECTION 3: Pass-through cases ----
+echo "--- Pass-through (allow silently) ---"
+
+test_allow "Copilot CLI: already rtk: rtk git status" \
+ "$(copilot_bash_input "rtk git status")"
+
+test_allow "Copilot CLI: already rtk: rtk cargo test" \
+ "$(copilot_bash_input "rtk cargo test")"
+
+test_allow "Copilot CLI: heredoc" \
+ "$(copilot_bash_input "cat <<'EOF'
+hello
+EOF")"
+
+test_allow "Copilot CLI: unknown command: htop" \
+ "$(copilot_bash_input "htop")"
+
+test_allow "Copilot CLI: unknown command: echo" \
+ "$(copilot_bash_input "echo hello world")"
+
+test_allow "Copilot CLI: non-bash tool: view" \
+ "$(tool_input "view")"
+
+test_allow "Copilot CLI: non-bash tool: edit" \
+ "$(tool_input "edit")"
+
+test_allow "VS Code: already rtk" \
+ "$(vscode_bash_input "rtk git status")"
+
+test_allow "VS Code: non-bash tool: editFiles" \
+ "$(jq -cn '{"tool_name":"editFiles"}')"
+
+echo ""
+
+# ---- SECTION 4: Output format assertions ----
+echo "--- Output format ---"
+
+# Copilot CLI output format
+TOTAL=$((TOTAL + 1))
+raw_output=$(copilot_bash_input "git status" | "$RTK" hook 2>/dev/null)
+
+if echo "$raw_output" | jq . >/dev/null 2>&1; then
+ printf " ${GREEN}PASS${RESET} Copilot CLI: output is valid JSON\n"
+ PASS=$((PASS + 1))
+else
+ printf " ${RED}FAIL${RESET} Copilot CLI: output is not valid JSON: %s\n" "$raw_output"
+ FAIL=$((FAIL + 1))
+fi
+
+TOTAL=$((TOTAL + 1))
+decision=$(echo "$raw_output" | jq -r '.permissionDecision')
+if [ "$decision" = "deny" ]; then
+ printf " ${GREEN}PASS${RESET} Copilot CLI: permissionDecision == \"deny\"\n"
+ PASS=$((PASS + 1))
+else
+ printf " ${RED}FAIL${RESET} Copilot CLI: expected \"deny\", got \"%s\"\n" "$decision"
+ FAIL=$((FAIL + 1))
+fi
+
+TOTAL=$((TOTAL + 1))
+reason=$(echo "$raw_output" | jq -r '.permissionDecisionReason')
+if echo "$reason" | grep -qE '`rtk [^`]+`'; then
+ printf " ${GREEN}PASS${RESET} Copilot CLI: reason contains backtick-quoted rtk command ${DIM}→ %s${RESET}\n" "$reason"
+ PASS=$((PASS + 1))
+else
+ printf " ${RED}FAIL${RESET} Copilot CLI: reason missing backtick-quoted command: %s\n" "$reason"
+ FAIL=$((FAIL + 1))
+fi
+
+# VS Code output format
+TOTAL=$((TOTAL + 1))
+vscode_output=$(vscode_bash_input "git status" | "$RTK" hook 2>/dev/null)
+
+if echo "$vscode_output" | jq . >/dev/null 2>&1; then
+ printf " ${GREEN}PASS${RESET} VS Code: output is valid JSON\n"
+ PASS=$((PASS + 1))
+else
+ printf " ${RED}FAIL${RESET} VS Code: output is not valid JSON: %s\n" "$vscode_output"
+ FAIL=$((FAIL + 1))
+fi
+
+TOTAL=$((TOTAL + 1))
+vscode_decision=$(echo "$vscode_output" | jq -r '.hookSpecificOutput.permissionDecision')
+if [ "$vscode_decision" = "allow" ]; then
+ printf " ${GREEN}PASS${RESET} VS Code: hookSpecificOutput.permissionDecision == \"allow\"\n"
+ PASS=$((PASS + 1))
+else
+ printf " ${RED}FAIL${RESET} VS Code: expected \"allow\", got \"%s\"\n" "$vscode_decision"
+ FAIL=$((FAIL + 1))
+fi
+
+TOTAL=$((TOTAL + 1))
+vscode_updated=$(echo "$vscode_output" | jq -r '.hookSpecificOutput.updatedInput.command')
+if echo "$vscode_updated" | grep -q "^rtk "; then
+ printf " ${GREEN}PASS${RESET} VS Code: updatedInput.command starts with rtk ${DIM}→ %s${RESET}\n" "$vscode_updated"
+ PASS=$((PASS + 1))
+else
+ printf " ${RED}FAIL${RESET} VS Code: updatedInput.command should start with rtk: %s\n" "$vscode_updated"
+ FAIL=$((FAIL + 1))
+fi
+
+echo ""
+
+# ---- SUMMARY ----
+echo "============================================"
+if [ $FAIL -eq 0 ]; then
+ printf " ${GREEN}ALL $TOTAL TESTS PASSED${RESET}\n"
+else
+ printf " ${RED}$FAIL FAILED${RESET} / $TOTAL total ($PASS passed)\n"
+fi
+echo "============================================"
+
+exit $FAIL
diff --git a/hooks/windsurf-rtk-rules.md b/hooks/windsurf-rtk-rules.md
new file mode 100644
index 000000000..8491f5a78
--- /dev/null
+++ b/hooks/windsurf-rtk-rules.md
@@ -0,0 +1,32 @@
+# RTK - Rust Token Killer (Windsurf)
+
+**Usage**: Token-optimized CLI proxy for shell commands.
+
+## Rule
+
+Always prefix shell commands with `rtk` to minimize token consumption.
+
+Examples:
+
+```bash
+rtk git status
+rtk cargo test
+rtk ls src/
+rtk grep "pattern" src/
+rtk find "*.rs" .
+rtk docker ps
+rtk gh pr list
+```
+
+## Meta Commands
+
+```bash
+rtk gain # Show token savings
+rtk gain --history # Command history with savings
+rtk discover # Find missed RTK opportunities
+rtk proxy # Run raw (no filtering, for debugging)
+```
+
+## Why
+
+RTK filters and compresses command output before it reaches the LLM context, saving 60-90% tokens on common operations. Always use `rtk ` instead of raw commands.
diff --git a/openclaw/README.md b/openclaw/README.md
new file mode 100644
index 000000000..301d7c0fa
--- /dev/null
+++ b/openclaw/README.md
@@ -0,0 +1,86 @@
+# RTK Plugin for OpenClaw
+
+Transparently rewrites shell commands executed via OpenClaw's `exec` tool to their RTK equivalents, achieving 60-90% LLM token savings.
+
+This is the OpenClaw equivalent of the Claude Code hooks in `hooks/rtk-rewrite.sh`.
+
+## How it works
+
+The plugin registers a `before_tool_call` hook that intercepts `exec` tool calls. When the agent runs a command like `git status`, the plugin delegates to `rtk rewrite` which returns the optimized command (e.g. `rtk git status`). The compressed output enters the agent's context window, saving tokens.
+
+All rewrite logic lives in RTK itself (`rtk rewrite`). This plugin is a thin delegate -- when new filters are added to RTK, the plugin picks them up automatically with zero changes.
+
+## Installation
+
+### Prerequisites
+
+RTK must be installed and available in `$PATH`:
+
+```bash
+brew install rtk
+# or
+curl -fsSL https://raw.githubusercontent.com/rtk-ai/rtk/refs/heads/master/install.sh | sh
+```
+
+### Install the plugin
+
+```bash
+# Copy the plugin to OpenClaw's extensions directory
+mkdir -p ~/.openclaw/extensions/rtk-rewrite
+cp openclaw/index.ts openclaw/openclaw.plugin.json ~/.openclaw/extensions/rtk-rewrite/
+
+# Restart the gateway
+openclaw gateway restart
+```
+
+### Or install via OpenClaw CLI
+
+```bash
+openclaw plugins install ./openclaw
+```
+
+## Configuration
+
+In `openclaw.json`:
+
+```json5
+{
+ plugins: {
+ entries: {
+ "rtk-rewrite": {
+ enabled: true,
+ config: {
+ enabled: true, // Toggle rewriting on/off
+ verbose: false // Log rewrites to console
+ }
+ }
+ }
+ }
+}
+```
+
+## What gets rewritten
+
+Everything that `rtk rewrite` supports (30+ commands). See the [full command list](https://github.com/rtk-ai/rtk#commands).
+
+## What's NOT rewritten
+
+Handled by `rtk rewrite` guards:
+- Commands already using `rtk`
+- Piped commands (`|`, `&&`, `;`)
+- Heredocs (`<<`)
+- Commands without an RTK filter
+
+## Measured savings
+
+| Command | Token savings |
+|---------|--------------|
+| `git log --stat` | 87% |
+| `ls -la` | 78% |
+| `git status` | 66% |
+| `grep` (single file) | 52% |
+| `find -name` | 48% |
+
+## License
+
+MIT -- same as RTK.
diff --git a/openclaw/index.ts b/openclaw/index.ts
new file mode 100644
index 000000000..17ea4ec93
--- /dev/null
+++ b/openclaw/index.ts
@@ -0,0 +1,74 @@
+/**
+ * RTK Rewrite Plugin for OpenClaw
+ *
+ * Transparently rewrites exec tool commands to RTK equivalents
+ * before execution, achieving 60-90% LLM token savings.
+ *
+ * All rewrite logic lives in `rtk rewrite` (src/discover/registry.rs).
+ * This plugin is a thin delegate — to add or change rules, edit the
+ * Rust registry, not this file.
+ */
+
+import { execSync } from "node:child_process";
+
+let rtkAvailable: boolean | null = null;
+
+function checkRtk(): boolean {
+ if (rtkAvailable !== null) return rtkAvailable;
+ try {
+ execSync("which rtk", { stdio: "ignore" });
+ rtkAvailable = true;
+ } catch {
+ rtkAvailable = false;
+ }
+ return rtkAvailable;
+}
+
+function tryRewrite(command: string): string | null {
+ try {
+ const result = execSync(`rtk rewrite ${JSON.stringify(command)}`, {
+ encoding: "utf-8",
+ timeout: 2000,
+ }).trim();
+ return result && result !== command ? result : null;
+ } catch {
+ return null;
+ }
+}
+
+export default function register(api: any) {
+ const pluginConfig = api.config ?? {};
+ const enabled = pluginConfig.enabled !== false;
+ const verbose = pluginConfig.verbose === true;
+
+ if (!enabled) return;
+
+ if (!checkRtk()) {
+ console.warn("[rtk] rtk binary not found in PATH — plugin disabled");
+ return;
+ }
+
+ api.on(
+ "before_tool_call",
+ (event: { toolName: string; params: Record }) => {
+ if (event.toolName !== "exec") return;
+
+ const command = event.params?.command;
+ if (typeof command !== "string") return;
+
+ const rewritten = tryRewrite(command);
+ if (!rewritten) return;
+
+ if (verbose) {
+ console.log(`[rtk] ${command} -> ${rewritten}`);
+ }
+
+ return { params: { ...event.params, command: rewritten } };
+ },
+ { priority: 10 }
+ );
+
+ if (verbose) {
+ console.log("[rtk] OpenClaw plugin registered");
+ }
+}
diff --git a/openclaw/openclaw.plugin.json b/openclaw/openclaw.plugin.json
new file mode 100644
index 000000000..3fce418d7
--- /dev/null
+++ b/openclaw/openclaw.plugin.json
@@ -0,0 +1,28 @@
+{
+ "id": "rtk-rewrite",
+ "name": "RTK Token Optimizer",
+ "version": "1.0.0",
+ "description": "Transparently rewrites shell commands to their RTK equivalents for 60-90% LLM token savings",
+ "homepage": "https://github.com/rtk-ai/rtk",
+ "license": "MIT",
+ "configSchema": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "default": true,
+ "description": "Enable automatic command rewriting to RTK equivalents"
+ },
+ "verbose": {
+ "type": "boolean",
+ "default": false,
+ "description": "Log rewrite decisions to console for debugging"
+ }
+ }
+ },
+ "uiHints": {
+ "enabled": { "label": "Enable RTK rewriting" },
+ "verbose": { "label": "Verbose logging" }
+ }
+}
diff --git a/openclaw/package.json b/openclaw/package.json
new file mode 100644
index 000000000..18d359ff4
--- /dev/null
+++ b/openclaw/package.json
@@ -0,0 +1,29 @@
+{
+ "name": "@rtk-ai/rtk-rewrite",
+ "version": "1.0.0",
+ "description": "RTK plugin for OpenClaw — rewrites shell commands for 60-90% LLM token savings",
+ "main": "index.ts",
+ "license": "MIT",
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/rtk-ai/rtk",
+ "directory": "openclaw"
+ },
+ "homepage": "https://github.com/rtk-ai/rtk",
+ "keywords": [
+ "rtk",
+ "openclaw",
+ "openclaw-plugin",
+ "token-savings",
+ "llm",
+ "cli-proxy"
+ ],
+ "files": [
+ "index.ts",
+ "openclaw.plugin.json",
+ "README.md"
+ ],
+ "peerDependencies": {
+ "rtk": ">=0.28.0"
+ }
+}
diff --git a/scripts/test-all.sh b/scripts/test-all.sh
index 4cbbef02e..f0e2c06b1 100755
--- a/scripts/test-all.sh
+++ b/scripts/test-all.sh
@@ -437,20 +437,42 @@ else
skip_test "rtk gt" "gt not installed"
fi
-# ── 30. Global flags ────────────────────────────────
+# ── 30. Ruby (conditional) ──────────────────────────
+
+section "Ruby (conditional)"
+
+if command -v rspec &>/dev/null; then
+ assert_help "rtk rspec" rtk rspec --help
+else
+ skip_test "rtk rspec" "rspec not installed"
+fi
+
+if command -v rubocop &>/dev/null; then
+ assert_help "rtk rubocop" rtk rubocop --help
+else
+ skip_test "rtk rubocop" "rubocop not installed"
+fi
+
+if command -v rake &>/dev/null; then
+ assert_help "rtk rake" rtk rake --help
+else
+ skip_test "rtk rake" "rake not installed"
+fi
+
+# ── 31. Global flags ────────────────────────────────
section "Global flags"
assert_ok "rtk -u ls ." rtk -u ls .
assert_ok "rtk --skip-env npm --help" rtk --skip-env npm --help
-# ── 31. CcEconomics ─────────────────────────────────
+# ── 32. CcEconomics ─────────────────────────────────
section "CcEconomics"
assert_ok "rtk cc-economics" rtk cc-economics
-# ── 32. Learn ───────────────────────────────────────
+# ── 33. Learn ───────────────────────────────────────
section "Learn"
diff --git a/scripts/test-ruby.sh b/scripts/test-ruby.sh
new file mode 100755
index 000000000..3b3008b97
--- /dev/null
+++ b/scripts/test-ruby.sh
@@ -0,0 +1,463 @@
+#!/usr/bin/env bash
+#
+# RTK Smoke Tests — Ruby (RSpec, RuboCop, Minitest, Bundle)
+# Creates a minimal Rails app, exercises all Ruby RTK filters, then cleans up.
+# Usage: bash scripts/test-ruby.sh
+#
+# Prerequisites: rtk (installed), ruby, bundler, rails gem
+# Duration: ~60-120s (rails new + bundle install dominate)
+#
+set -euo pipefail
+
+PASS=0
+FAIL=0
+SKIP=0
+FAILURES=()
+
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[0;33m'
+CYAN='\033[0;36m'
+BOLD='\033[1m'
+NC='\033[0m'
+
+# ── Helpers ──────────────────────────────────────────
+
+assert_ok() {
+ local name="$1"; shift
+ local output
+ if output=$("$@" 2>&1); then
+ PASS=$((PASS + 1))
+ printf " ${GREEN}PASS${NC} %s\n" "$name"
+ else
+ FAIL=$((FAIL + 1))
+ FAILURES+=("$name")
+ printf " ${RED}FAIL${NC} %s\n" "$name"
+ printf " cmd: %s\n" "$*"
+ printf " out: %s\n" "$(echo "$output" | head -3)"
+ fi
+}
+
+assert_contains() {
+ local name="$1"; local needle="$2"; shift 2
+ local output
+ if output=$("$@" 2>&1) && echo "$output" | grep -q "$needle"; then
+ PASS=$((PASS + 1))
+ printf " ${GREEN}PASS${NC} %s\n" "$name"
+ else
+ FAIL=$((FAIL + 1))
+ FAILURES+=("$name")
+ printf " ${RED}FAIL${NC} %s\n" "$name"
+ printf " expected: '%s'\n" "$needle"
+ printf " got: %s\n" "$(echo "$output" | head -3)"
+ fi
+}
+
+# Allow non-zero exit but check output
+assert_output() {
+ local name="$1"; local needle="$2"; shift 2
+ local output
+ output=$("$@" 2>&1) || true
+ if echo "$output" | grep -qi "$needle"; then
+ PASS=$((PASS + 1))
+ printf " ${GREEN}PASS${NC} %s\n" "$name"
+ else
+ FAIL=$((FAIL + 1))
+ FAILURES+=("$name")
+ printf " ${RED}FAIL${NC} %s\n" "$name"
+ printf " expected: '%s'\n" "$needle"
+ printf " got: %s\n" "$(echo "$output" | head -3)"
+ fi
+}
+
+skip_test() {
+ local name="$1"; local reason="$2"
+ SKIP=$((SKIP + 1))
+ printf " ${YELLOW}SKIP${NC} %s (%s)\n" "$name" "$reason"
+}
+
+# Assert command exits with non-zero and output matches needle
+assert_exit_nonzero() {
+ local name="$1"; local needle="$2"; shift 2
+ local output
+ local rc=0
+ output=$("$@" 2>&1) || rc=$?
+ if [[ $rc -ne 0 ]] && echo "$output" | grep -qi "$needle"; then
+ PASS=$((PASS + 1))
+ printf " ${GREEN}PASS${NC} %s (exit=%d)\n" "$name" "$rc"
+ else
+ FAIL=$((FAIL + 1))
+ FAILURES+=("$name")
+ printf " ${RED}FAIL${NC} %s (exit=%d)\n" "$name" "$rc"
+ if [[ $rc -eq 0 ]]; then
+ printf " expected non-zero exit, got 0\n"
+ else
+ printf " expected: '%s'\n" "$needle"
+ fi
+ printf " out: %s\n" "$(echo "$output" | head -3)"
+ fi
+}
+
+section() {
+ printf "\n${BOLD}${CYAN}── %s ──${NC}\n" "$1"
+}
+
+# ── Prerequisite checks ─────────────────────────────
+
+RTK=$(command -v rtk || echo "")
+if [[ -z "$RTK" ]]; then
+ echo "rtk not found in PATH. Run: cargo install --path ."
+ exit 1
+fi
+
+if ! command -v ruby >/dev/null 2>&1; then
+ echo "ruby not found in PATH. Install Ruby first."
+ exit 1
+fi
+
+if ! command -v bundle >/dev/null 2>&1; then
+ echo "bundler not found in PATH. Run: gem install bundler"
+ exit 1
+fi
+
+if ! command -v rails >/dev/null 2>&1; then
+ echo "rails not found in PATH. Run: gem install rails"
+ exit 1
+fi
+
+# ── Preamble ─────────────────────────────────────────
+
+printf "${BOLD}RTK Smoke Tests — Ruby (RSpec, RuboCop, Minitest, Bundle)${NC}\n"
+printf "Binary: %s (%s)\n" "$RTK" "$(rtk --version)"
+printf "Ruby: %s\n" "$(ruby --version)"
+printf "Rails: %s\n" "$(rails --version)"
+printf "Bundler: %s\n" "$(bundle --version)"
+printf "Date: %s\n\n" "$(date '+%Y-%m-%d %H:%M')"
+
+# ── Temp dir + cleanup trap ──────────────────────────
+
+TMPDIR=$(mktemp -d /tmp/rtk-ruby-smoke-XXXXXX)
+trap 'rm -rf "$TMPDIR"' EXIT
+
+printf "${BOLD}Setting up temporary Rails app in %s ...${NC}\n" "$TMPDIR"
+
+# ── Setup phase (not counted in assertions) ──────────
+
+cd "$TMPDIR"
+
+# 1. Create minimal Rails app
+printf " → rails new (--minimal --skip-git --skip-docker) ...\n"
+rails new rtk_smoke_app --minimal --skip-git --skip-docker --quiet 2>&1 | tail -1 || true
+cd rtk_smoke_app
+
+# 2. Add rspec-rails and rubocop to Gemfile
+cat >> Gemfile <<'GEMFILE'
+
+group :development, :test do
+ gem 'rspec-rails'
+ gem 'rubocop', require: false
+end
+GEMFILE
+
+# 3. Bundle install
+printf " → bundle install ...\n"
+bundle install --quiet 2>&1 | tail -1 || true
+
+# 4. Generate scaffold (creates model + minitest files)
+printf " → rails generate scaffold Post ...\n"
+rails generate scaffold Post title:string body:text published:boolean --quiet 2>&1 | tail -1 || true
+
+# 5. Install RSpec + create manual spec file
+printf " → rails generate rspec:install ...\n"
+rails generate rspec:install --quiet 2>&1 | tail -1 || true
+
+mkdir -p spec/models
+cat > spec/models/post_spec.rb <<'SPEC'
+require 'rails_helper'
+
+RSpec.describe Post, type: :model do
+ it "is valid with valid attributes" do
+ post = Post.new(title: "Test", body: "Body", published: false)
+ expect(post).to be_valid
+ end
+end
+SPEC
+
+# 6. Create + migrate database
+printf " → rails db:create && db:migrate ...\n"
+rails db:create --quiet 2>&1 | tail -1 || true
+rails db:migrate --quiet 2>&1 | tail -1 || true
+
+# 7. Create a file with intentional RuboCop offenses
+printf " → creating rubocop_bait.rb with intentional offenses ...\n"
+cat > app/models/rubocop_bait.rb <<'BAIT'
+class RubocopBait < ApplicationRecord
+ def messy_method()
+ x = 1
+ y = 2
+ if x == 1
+ puts "hello world"
+ end
+ return nil
+ end
+end
+BAIT
+
+# 8. Create a failing RSpec spec
+printf " → creating failing rspec spec ...\n"
+cat > spec/models/post_fail_spec.rb <<'FAILSPEC'
+require 'rails_helper'
+
+RSpec.describe Post, type: :model do
+ it "intentionally fails validation check" do
+ post = Post.new(title: "Hello", body: "World", published: false)
+ expect(post.title).to eq("Wrong Title On Purpose")
+ end
+end
+FAILSPEC
+
+# 9. Create an RSpec spec with pending example
+printf " → creating rspec spec with pending example ...\n"
+cat > spec/models/post_pending_spec.rb <<'PENDSPEC'
+require 'rails_helper'
+
+RSpec.describe Post, type: :model do
+ it "is valid with title" do
+ post = Post.new(title: "OK", body: "Body", published: false)
+ expect(post).to be_valid
+ end
+
+ it "will support markdown later" do
+ pending "Not yet implemented"
+ expect(Post.new.render_markdown).to eq("hello
")
+ end
+end
+PENDSPEC
+
+# 10. Create a failing minitest test
+printf " → creating failing minitest test ...\n"
+cat > test/models/post_fail_test.rb <<'FAILTEST'
+require "test_helper"
+
+class PostFailTest < ActiveSupport::TestCase
+ test "intentionally fails" do
+ assert_equal "wrong", Post.new(title: "right").title
+ end
+end
+FAILTEST
+
+# 11. Create a passing minitest test
+printf " → creating passing minitest test ...\n"
+cat > test/models/post_pass_test.rb <<'PASSTEST'
+require "test_helper"
+
+class PostPassTest < ActiveSupport::TestCase
+ test "post is valid" do
+ post = Post.new(title: "OK", body: "Body", published: false)
+ assert post.valid?
+ end
+end
+PASSTEST
+
+printf "\n${BOLD}Setup complete. Running tests...${NC}\n"
+
+# ══════════════════════════════════════════════════════
+# Test sections
+# ══════════════════════════════════════════════════════
+
+# ── 1. RSpec ─────────────────────────────────────────
+
+section "RSpec"
+
+assert_output "rtk rspec (with failure)" \
+ "failed" \
+ rtk rspec
+
+assert_output "rtk rspec spec/models/post_spec.rb (pass)" \
+ "RSpec.*passed" \
+ rtk rspec spec/models/post_spec.rb
+
+assert_output "rtk rspec spec/models/post_fail_spec.rb (fail)" \
+ "failed\|❌" \
+ rtk rspec spec/models/post_fail_spec.rb
+
+# ── 2. RuboCop ───────────────────────────────────────
+
+section "RuboCop"
+
+assert_output "rtk rubocop (with offenses)" \
+ "offense" \
+ rtk rubocop
+
+assert_output "rtk rubocop app/ (with offenses)" \
+ "rubocop_bait\|offense" \
+ rtk rubocop app/
+
+# ── 3. Minitest (rake test) ──────────────────────────
+
+section "Minitest (rake test)"
+
+assert_output "rtk rake test (with failure)" \
+ "failure\|error\|FAIL" \
+ rtk rake test
+
+assert_output "rtk rake test single passing file" \
+ "ok rake test\|0 failures" \
+ rtk rake test TEST=test/models/post_pass_test.rb
+
+assert_exit_nonzero "rtk rake test single failing file" \
+ "failure\|FAIL" \
+ rtk rake test test/models/post_fail_test.rb
+
+# ── 4. Bundle install ────────────────────────────────
+
+section "Bundle install"
+
+assert_output "rtk bundle install (idempotent)" \
+ "bundle\|ok\|complete\|install" \
+ rtk bundle install
+
+# ── 5. Exit code preservation ────────────────────────
+
+section "Exit code preservation"
+
+assert_exit_nonzero "rtk rspec exits non-zero on failure" \
+ "failed\|failure" \
+ rtk rspec spec/models/post_fail_spec.rb
+
+assert_exit_nonzero "rtk rubocop exits non-zero on offenses" \
+ "offense" \
+ rtk rubocop app/models/rubocop_bait.rb
+
+assert_exit_nonzero "rtk rake test exits non-zero on failure" \
+ "failure\|FAIL" \
+ rtk rake test test/models/post_fail_test.rb
+
+# ── 6. bundle exec variants ─────────────────────────
+
+section "bundle exec variants"
+
+assert_output "bundle exec rspec spec/models/post_spec.rb" \
+ "passed\|example" \
+ rtk bundle exec rspec spec/models/post_spec.rb
+
+assert_output "bundle exec rubocop app/" \
+ "offense" \
+ rtk bundle exec rubocop app/
+
+# ── 7. RuboCop autocorrect ───────────────────────────
+
+section "RuboCop autocorrect"
+
+# Copy bait file so autocorrect has something to fix
+cp app/models/rubocop_bait.rb app/models/rubocop_bait_ac.rb
+sed -i.bak 's/RubocopBait/RubocopBaitAc/' app/models/rubocop_bait_ac.rb
+
+assert_output "rtk rubocop -A (autocorrect)" \
+ "autocorrected\|rubocop\|ok\|offense\|inspected" \
+ rtk rubocop -A app/models/rubocop_bait_ac.rb
+
+# Clean up autocorrect test file
+rm -f app/models/rubocop_bait_ac.rb app/models/rubocop_bait_ac.rb.bak
+
+# ── 8. RSpec pending ─────────────────────────────────
+
+section "RSpec pending"
+
+assert_output "rtk rspec with pending example" \
+ "pending" \
+ rtk rspec spec/models/post_pending_spec.rb
+
+# ── 9. RSpec text fallback ───────────────────────────
+
+section "RSpec text fallback"
+
+assert_output "rtk rspec --format documentation (text path)" \
+ "valid\|example\|post" \
+ rtk rspec --format documentation spec/models/post_spec.rb
+
+# ── 10. RSpec empty suite ────────────────────────────
+
+section "RSpec empty suite"
+
+assert_output "rtk rspec nonexistent tag" \
+ "0 examples\|No examples" \
+ rtk rspec --tag nonexistent spec/models/post_spec.rb
+
+# ── 11. Token savings ────────────────────────────────
+
+section "Token savings"
+
+# rspec (passing spec)
+raw_len=$( (bundle exec rspec spec/models/post_spec.rb 2>&1 || true) | wc -c | tr -d ' ')
+rtk_len=$( (rtk rspec spec/models/post_spec.rb 2>&1 || true) | wc -c | tr -d ' ')
+if [[ "$rtk_len" -lt "$raw_len" ]]; then
+ PASS=$((PASS + 1))
+ printf " ${GREEN}PASS${NC} rspec: rtk (%s bytes) < raw (%s bytes)\n" "$rtk_len" "$raw_len"
+else
+ FAIL=$((FAIL + 1))
+ FAILURES+=("token savings: rspec")
+ printf " ${RED}FAIL${NC} rspec: rtk (%s bytes) >= raw (%s bytes)\n" "$rtk_len" "$raw_len"
+fi
+
+# rubocop (exits non-zero on offenses, so || true)
+raw_len=$( (bundle exec rubocop app/ 2>&1 || true) | wc -c | tr -d ' ')
+rtk_len=$( (rtk rubocop app/ 2>&1 || true) | wc -c | tr -d ' ')
+if [[ "$rtk_len" -lt "$raw_len" ]]; then
+ PASS=$((PASS + 1))
+ printf " ${GREEN}PASS${NC} rubocop: rtk (%s bytes) < raw (%s bytes)\n" "$rtk_len" "$raw_len"
+else
+ FAIL=$((FAIL + 1))
+ FAILURES+=("token savings: rubocop")
+ printf " ${RED}FAIL${NC} rubocop: rtk (%s bytes) >= raw (%s bytes)\n" "$rtk_len" "$raw_len"
+fi
+
+# rake test (passing file)
+raw_len=$( (bundle exec rake test TEST=test/models/post_pass_test.rb 2>&1 || true) | wc -c | tr -d ' ')
+rtk_len=$( (rtk rake test test/models/post_pass_test.rb 2>&1 || true) | wc -c | tr -d ' ')
+if [[ "$rtk_len" -lt "$raw_len" ]]; then
+ PASS=$((PASS + 1))
+ printf " ${GREEN}PASS${NC} rake test: rtk (%s bytes) < raw (%s bytes)\n" "$rtk_len" "$raw_len"
+else
+ FAIL=$((FAIL + 1))
+ FAILURES+=("token savings: rake test")
+ printf " ${RED}FAIL${NC} rake test: rtk (%s bytes) >= raw (%s bytes)\n" "$rtk_len" "$raw_len"
+fi
+
+# bundle install (idempotent)
+raw_len=$( (bundle install 2>&1 || true) | wc -c | tr -d ' ')
+rtk_len=$( (rtk bundle install 2>&1 || true) | wc -c | tr -d ' ')
+if [[ "$rtk_len" -lt "$raw_len" ]]; then
+ PASS=$((PASS + 1))
+ printf " ${GREEN}PASS${NC} bundle install: rtk (%s bytes) < raw (%s bytes)\n" "$rtk_len" "$raw_len"
+else
+ FAIL=$((FAIL + 1))
+ FAILURES+=("token savings: bundle install")
+ printf " ${RED}FAIL${NC} bundle install: rtk (%s bytes) >= raw (%s bytes)\n" "$rtk_len" "$raw_len"
+fi
+
+# ── 12. Verbose flag ─────────────────────────────────
+
+section "Verbose flag (-v)"
+
+assert_output "rtk -v rspec (verbose)" \
+ "RSpec\|passed\|Running\|example" \
+ rtk -v rspec spec/models/post_spec.rb
+
+# ══════════════════════════════════════════════════════
+# Report
+# ══════════════════════════════════════════════════════
+
+printf "\n${BOLD}══════════════════════════════════════${NC}\n"
+printf "${BOLD}Results: ${GREEN}%d passed${NC}, ${RED}%d failed${NC}, ${YELLOW}%d skipped${NC}\n" "$PASS" "$FAIL" "$SKIP"
+
+if [[ ${#FAILURES[@]} -gt 0 ]]; then
+ printf "\n${RED}Failures:${NC}\n"
+ for f in "${FAILURES[@]}"; do
+ printf " - %s\n" "$f"
+ done
+fi
+
+printf "${BOLD}══════════════════════════════════════${NC}\n"
+
+exit "$FAIL"
diff --git a/src/cargo_cmd.rs b/src/cargo_cmd.rs
index 159636e79..eabf8a372 100644
--- a/src/cargo_cmd.rs
+++ b/src/cargo_cmd.rs
@@ -40,6 +40,11 @@ fn restore_double_dash_with_raw(args: &[String], raw_args: &[String]) -> Vec pos,
@@ -259,7 +264,7 @@ fn filter_cargo_install(output: &str) -> String {
// Already installed / up to date
if already_installed {
let info = ignored_line.split('`').nth(1).unwrap_or(&ignored_line);
- return format!("✓ cargo install: {} already installed", info);
+ return format!("cargo install: {} already installed", info);
}
// Errors
@@ -308,10 +313,7 @@ fn filter_cargo_install(output: &str) -> String {
// Success
let crate_info = format_crate_info(&installed_crate, &installed_version, "package");
- let mut result = format!(
- "✓ cargo install ({}, {} deps compiled)",
- crate_info, compiled
- );
+ let mut result = format!("cargo install ({}, {} deps compiled)", crate_info, compiled);
for line in &replaced_lines {
result.push_str(&format!("\n {}", line));
@@ -497,7 +499,7 @@ fn filter_cargo_nextest(output: &str) -> String {
} else {
format!("{}, {}s", binary_text, duration)
};
- return format!("✓ cargo nextest: {} ({})", parts.join(", "), meta);
+ return format!("cargo nextest: {} ({})", parts.join(", "), meta);
}
// With failures - show failure details then summary
@@ -620,7 +622,7 @@ fn filter_cargo_build(output: &str) -> String {
}
if error_count == 0 && warnings == 0 {
- return format!("✓ cargo build ({} crates compiled)", compiled);
+ return format!("cargo build ({} crates compiled)", compiled);
}
let mut result = String::new();
@@ -734,11 +736,11 @@ impl AggregatedTestResult {
if self.has_duration {
format!(
- "✓ cargo test: {} ({}, {:.2}s)",
+ "cargo test: {} ({}, {:.2}s)",
counts, suite_text, self.duration_secs
)
} else {
- format!("✓ cargo test: {} ({})", counts, suite_text)
+ format!("cargo test: {} ({})", counts, suite_text)
}
}
}
@@ -826,7 +828,7 @@ fn filter_cargo_test(output: &str) -> String {
// Fallback: use original behavior if regex failed
for line in &summary_lines {
- result.push_str(&format!("✓ {}\n", line));
+ result.push_str(&format!("{}\n", line));
}
return result.trim().to_string();
}
@@ -848,6 +850,18 @@ fn filter_cargo_test(output: &str) -> String {
}
if result.trim().is_empty() {
+ let has_compile_errors = output.lines().any(|line| {
+ let trimmed = line.trim_start();
+ trimmed.starts_with("error[") || trimmed.starts_with("error:")
+ });
+
+ if has_compile_errors {
+ let build_filtered = filter_cargo_build(output);
+ if build_filtered.starts_with("cargo build:") {
+ return build_filtered.replacen("cargo build:", "cargo test:", 1);
+ }
+ }
+
// Fallback: show last meaningful lines
let meaningful: Vec<&str> = output
.lines()
@@ -926,7 +940,7 @@ fn filter_cargo_clippy(output: &str) -> String {
}
if error_count == 0 && warning_count == 0 {
- return "✓ cargo clippy: No issues found".to_string();
+ return "cargo clippy: No issues found".to_string();
}
let mut result = String::new();
@@ -1054,6 +1068,42 @@ mod tests {
assert_eq!(result, vec!["--", "-D", "warnings"]);
}
+ #[test]
+ fn test_restore_double_dash_clippy_with_package_flags() {
+ // rtk cargo clippy -p my-service -p my-crate -- -D warnings
+ // Clap with trailing_var_arg preserves "--" when args precede it
+ // → clap gives ["-p", "my-service", "-p", "my-crate", "--", "-D", "warnings"]
+ let args: Vec = vec![
+ "-p".into(),
+ "my-service".into(),
+ "-p".into(),
+ "my-crate".into(),
+ "--".into(),
+ "-D".into(),
+ "warnings".into(),
+ ];
+ let raw = vec![
+ "rtk".into(),
+ "cargo".into(),
+ "clippy".into(),
+ "-p".into(),
+ "my-service".into(),
+ "-p".into(),
+ "my-crate".into(),
+ "--".into(),
+ "-D".into(),
+ "warnings".into(),
+ ];
+ let result = restore_double_dash_with_raw(&args, &raw);
+ // Should NOT double the "--"
+ assert_eq!(
+ result,
+ vec!["-p", "my-service", "-p", "my-crate", "--", "-D", "warnings"]
+ );
+ // Verify only one "--" exists
+ assert_eq!(result.iter().filter(|a| *a == "--").count(), 1);
+ }
+
#[test]
fn test_filter_cargo_build_success() {
let output = r#" Compiling libc v0.2.153
@@ -1062,7 +1112,7 @@ mod tests {
Finished dev [unoptimized + debuginfo] target(s) in 15.23s
"#;
let result = filter_cargo_build(output);
- assert!(result.contains("✓ cargo build"));
+ assert!(result.contains("cargo build"));
assert!(result.contains("3 crates compiled"));
}
@@ -1098,7 +1148,7 @@ test result: ok. 15 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; fin
"#;
let result = filter_cargo_test(output);
assert!(
- result.contains("✓ cargo test: 15 passed (1 suite, 0.01s)"),
+ result.contains("cargo test: 15 passed (1 suite, 0.01s)"),
"Expected compact format, got: {}",
result
);
@@ -1155,7 +1205,7 @@ test result: ok. 32 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; fin
"#;
let result = filter_cargo_test(output);
assert!(
- result.contains("✓ cargo test: 137 passed (4 suites, 1.45s)"),
+ result.contains("cargo test: 137 passed (4 suites, 1.45s)"),
"Expected aggregated format, got: {}",
result
);
@@ -1219,7 +1269,7 @@ test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; fini
"#;
let result = filter_cargo_test(output);
assert!(
- result.contains("✓ cargo test: 0 passed (3 suites, 0.00s)"),
+ result.contains("cargo test: 0 passed (3 suites, 0.00s)"),
"Expected compact format for zero tests, got: {}",
result
);
@@ -1239,7 +1289,7 @@ test result: ok. 18 passed; 0 failed; 2 ignored; 0 measured; 0 filtered out; fin
"#;
let result = filter_cargo_test(output);
assert!(
- result.contains("✓ cargo test: 63 passed, 5 ignored, 2 filtered out (2 suites, 0.70s)"),
+ result.contains("cargo test: 63 passed, 5 ignored, 2 filtered out (2 suites, 0.70s)"),
"Expected compact format with ignored and filtered, got: {}",
result
);
@@ -1254,7 +1304,7 @@ test result: ok. 15 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; fin
"#;
let result = filter_cargo_test(output);
assert!(
- result.contains("✓ cargo test: 15 passed (1 suite, 0.01s)"),
+ result.contains("cargo test: 15 passed (1 suite, 0.01s)"),
"Expected singular 'suite', got: {}",
result
);
@@ -1268,21 +1318,44 @@ running 15 tests
test result: MALFORMED LINE WITHOUT PROPER FORMAT
"#;
let result = filter_cargo_test(output);
- // Should fallback to original behavior (show line with checkmark)
+ // Should fallback to original behavior (show line without checkmark)
assert!(
- result.contains("✓ test result: MALFORMED"),
+ result.contains("test result: MALFORMED"),
"Expected fallback format, got: {}",
result
);
}
+ #[test]
+ fn test_filter_cargo_test_compile_error_preserves_error_header() {
+ let output = r#" Compiling rtk v0.31.0 (/workspace/projects/rtk)
+error[E0425]: cannot find value `missing_symbol` in this scope
+ --> tests/repro_compile_fail.rs:3:13
+ |
+3 | let _ = missing_symbol;
+ | ^^^^^^^^^^^^^^ not found in this scope
+
+For more information about this error, try `rustc --explain E0425`.
+error: could not compile `rtk` (test "repro_compile_fail") due to 1 previous error
+"#;
+ let result = filter_cargo_test(output);
+ assert!(result.contains("cargo test: 1 errors, 0 warnings (1 crates)"));
+ assert!(result.contains("error[E0425]"), "got: {}", result);
+ assert!(
+ result.contains("--> tests/repro_compile_fail.rs:3:13"),
+ "got: {}",
+ result
+ );
+ assert!(!result.starts_with('|'), "got: {}", result);
+ }
+
#[test]
fn test_filter_cargo_clippy_clean() {
let output = r#" Checking rtk v0.5.0
Finished dev [unoptimized + debuginfo] target(s) in 1.53s
"#;
let result = filter_cargo_clippy(output);
- assert!(result.contains("✓ cargo clippy: No issues found"));
+ assert!(result.contains("cargo clippy: No issues found"));
}
#[test]
@@ -1325,7 +1398,7 @@ warning: `rtk` (bin) generated 2 warnings
Replaced package `rtk v0.9.4` with `rtk v0.11.0` (/Users/user/.cargo/bin/rtk)
"#;
let result = filter_cargo_install(output);
- assert!(result.contains("✓ cargo install"), "got: {}", result);
+ assert!(result.contains("cargo install"), "got: {}", result);
assert!(result.contains("rtk v0.11.0"), "got: {}", result);
assert!(result.contains("5 deps compiled"), "got: {}", result);
assert!(result.contains("Replaced"), "got: {}", result);
@@ -1342,7 +1415,7 @@ warning: `rtk` (bin) generated 2 warnings
Replaced package `rtk v0.9.4` with `rtk v0.11.0` (/Users/user/.cargo/bin/rtk)
"#;
let result = filter_cargo_install(output);
- assert!(result.contains("✓ cargo install"), "got: {}", result);
+ assert!(result.contains("cargo install"), "got: {}", result);
assert!(result.contains("Replacing"), "got: {}", result);
assert!(result.contains("Replaced"), "got: {}", result);
}
@@ -1387,7 +1460,7 @@ error: aborting due to 1 previous error
#[test]
fn test_filter_cargo_install_empty_output() {
let result = filter_cargo_install("");
- assert!(result.contains("✓ cargo install"), "got: {}", result);
+ assert!(result.contains("cargo install"), "got: {}", result);
assert!(result.contains("0 deps compiled"), "got: {}", result);
}
@@ -1401,7 +1474,7 @@ error: aborting due to 1 previous error
warning: be sure to add `/Users/user/.cargo/bin` to your PATH
"#;
let result = filter_cargo_install(output);
- assert!(result.contains("✓ cargo install"), "got: {}", result);
+ assert!(result.contains("cargo install"), "got: {}", result);
assert!(
result.contains("be sure to add"),
"PATH warning should be kept: {}",
@@ -1451,7 +1524,7 @@ error: aborting due to 2 previous errors
Installing rtk v0.11.0
"#;
let result = filter_cargo_install(output);
- assert!(result.contains("✓ cargo install"), "got: {}", result);
+ assert!(result.contains("cargo install"), "got: {}", result);
assert!(!result.contains("Locking"), "got: {}", result);
assert!(!result.contains("Blocking"), "got: {}", result);
assert!(!result.contains("Downloading"), "got: {}", result);
@@ -1465,7 +1538,7 @@ error: aborting due to 2 previous errors
"#;
let result = filter_cargo_install(output);
// Path-based install: crate info not extracted from path
- assert!(result.contains("✓ cargo install"), "got: {}", result);
+ assert!(result.contains("cargo install"), "got: {}", result);
assert!(result.contains("1 deps compiled"), "got: {}", result);
}
@@ -1491,7 +1564,7 @@ error: aborting due to 2 previous errors
"#;
let result = filter_cargo_nextest(output);
assert_eq!(
- result, "✓ cargo nextest: 301 passed (1 binary, 0.192s)",
+ result, "cargo nextest: 301 passed (1 binary, 0.192s)",
"got: {}",
result
);
@@ -1576,7 +1649,7 @@ error: test run failed
"#;
let result = filter_cargo_nextest(output);
assert_eq!(
- result, "✓ cargo nextest: 50 passed, 3 skipped (2 binaries, 0.500s)",
+ result, "cargo nextest: 50 passed, 3 skipped (2 binaries, 0.500s)",
"got: {}",
result
);
@@ -1627,7 +1700,7 @@ error: test run failed
"#;
let result = filter_cargo_nextest(output);
assert_eq!(
- result, "✓ cargo nextest: 100 passed (5 binaries, 1.234s)",
+ result, "cargo nextest: 100 passed (5 binaries, 1.234s)",
"got: {}",
result
);
@@ -1662,7 +1735,7 @@ error: test run failed
result
);
assert!(
- result.contains("✓ cargo nextest: 10 passed"),
+ result.contains("cargo nextest: 10 passed"),
"got: {}",
result
);
diff --git a/src/cc_economics.rs b/src/cc_economics.rs
index b38bba2f9..6f50f677c 100644
--- a/src/cc_economics.rs
+++ b/src/cc_economics.rs
@@ -14,6 +14,7 @@ use crate::utils::{format_cpt, format_tokens, format_usd};
// ── Constants ──
+#[allow(dead_code)]
const BILLION: f64 = 1e9;
// API pricing ratios (verified Feb 2026, consistent across Claude models <=200K context)
@@ -249,7 +250,7 @@ fn merge_weekly(cc: Option>, rtk: Vec) -> Vec m,
None => {
- eprintln!("⚠️ Invalid week_start format: {}", entry.week_start);
+ eprintln!("[warn] Invalid week_start format: {}", entry.week_start);
continue;
}
};
@@ -441,7 +442,7 @@ fn display_summary(tracker: &Tracker, verbose: u8) -> Result<()> {
let totals = compute_totals(&periods);
- println!("💰 Claude Code Economics");
+ println!("[cost] Claude Code Economics");
println!("════════════════════════════════════════════════════");
println!();
@@ -549,7 +550,7 @@ fn display_daily(tracker: &Tracker, verbose: u8) -> Result<()> {
.context("Failed to load daily token savings from database")?;
let periods = merge_daily(cc_daily, rtk_daily);
- println!("📅 Daily Economics");
+ println!("Daily Economics");
println!("════════════════════════════════════════════════════");
print_period_table(&periods, verbose);
Ok(())
@@ -563,7 +564,7 @@ fn display_weekly(tracker: &Tracker, verbose: u8) -> Result<()> {
.context("Failed to load weekly token savings from database")?;
let periods = merge_weekly(cc_weekly, rtk_weekly);
- println!("📅 Weekly Economics");
+ println!("Weekly Economics");
println!("════════════════════════════════════════════════════");
print_period_table(&periods, verbose);
Ok(())
@@ -577,7 +578,7 @@ fn display_monthly(tracker: &Tracker, verbose: u8) -> Result<()> {
.context("Failed to load monthly token savings from database")?;
let periods = merge_monthly(cc_monthly, rtk_monthly);
- println!("📅 Monthly Economics");
+ println!("Monthly Economics");
println!("════════════════════════════════════════════════════");
print_period_table(&periods, verbose);
Ok(())
diff --git a/src/ccusage.rs b/src/ccusage.rs
index 99e88c7f9..b49e483d5 100644
--- a/src/ccusage.rs
+++ b/src/ccusage.rs
@@ -112,6 +112,7 @@ fn build_command() -> Option {
}
/// Check if ccusage CLI is available (binary or via npx)
+#[allow(dead_code)]
pub fn is_available() -> bool {
build_command().is_some()
}
@@ -125,7 +126,7 @@ pub fn fetch(granularity: Granularity) -> Result>> {
let mut cmd = match build_command() {
Some(cmd) => cmd,
None => {
- eprintln!("⚠️ ccusage not found. Install: npm i -g ccusage (or use npx ccusage)");
+ eprintln!("[warn] ccusage not found. Install: npm i -g ccusage (or use npx ccusage)");
return Ok(None);
}
};
@@ -145,7 +146,7 @@ pub fn fetch(granularity: Granularity) -> Result >> {
let output = match output {
Err(e) => {
- eprintln!("⚠️ ccusage execution failed: {}", e);
+ eprintln!("[warn] ccusage execution failed: {}", e);
return Ok(None);
}
Ok(o) => o,
@@ -154,7 +155,7 @@ pub fn fetch(granularity: Granularity) -> Result >> {
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
eprintln!(
- "⚠️ ccusage exited with {}: {}",
+ "[warn] ccusage exited with {}: {}",
output.status,
stderr.trim()
);
diff --git a/src/container.rs b/src/container.rs
index 8b582ca17..e609de0ce 100644
--- a/src/container.rs
+++ b/src/container.rs
@@ -53,21 +53,26 @@ fn docker_ps(_verbose: u8) -> Result<()> {
let mut rtk = String::new();
if stdout.trim().is_empty() {
- rtk.push_str("🐳 0 containers");
+ rtk.push_str("[docker] 0 containers");
println!("{}", rtk);
timer.track("docker ps", "rtk docker ps", &raw, &rtk);
return Ok(());
}
let count = stdout.lines().count();
- rtk.push_str(&format!("🐳 {} containers:\n", count));
+ rtk.push_str(&format!("[docker] {} containers:\n", count));
for line in stdout.lines().take(15) {
let parts: Vec<&str> = line.split('\t').collect();
if parts.len() >= 4 {
let id = &parts[0][..12.min(parts[0].len())];
let name = parts[1];
- let short_image = parts.get(3).unwrap_or(&"").split('/').last().unwrap_or("");
+ let short_image = parts
+ .get(3)
+ .unwrap_or(&"")
+ .split('/')
+ .next_back()
+ .unwrap_or("");
let ports = compact_ports(parts.get(4).unwrap_or(&""));
if ports == "-" {
rtk.push_str(&format!(" {} {} ({})\n", id, name, short_image));
@@ -114,7 +119,7 @@ fn docker_images(_verbose: u8) -> Result<()> {
let mut rtk = String::new();
if lines.is_empty() {
- rtk.push_str("🐳 0 images");
+ rtk.push_str("[docker] 0 images");
println!("{}", rtk);
timer.track("docker images", "rtk docker images", &raw, &rtk);
return Ok(());
@@ -141,7 +146,11 @@ fn docker_images(_verbose: u8) -> Result<()> {
} else {
format!("{:.0}MB", total_size_mb)
};
- rtk.push_str(&format!("🐳 {} images ({})\n", lines.len(), total_display));
+ rtk.push_str(&format!(
+ "[docker] {} images ({})\n",
+ lines.len(),
+ total_display
+ ));
for line in lines.iter().take(15) {
let parts: Vec<&str> = line.split('\t').collect();
@@ -183,8 +192,21 @@ fn docker_logs(args: &[String], _verbose: u8) -> Result<()> {
let stderr = String::from_utf8_lossy(&output.stderr);
let raw = format!("{}\n{}", stdout, stderr);
+ if !output.status.success() {
+ if !stderr.trim().is_empty() {
+ eprint!("{}", stderr);
+ }
+ timer.track(
+ &format!("docker logs {}", container),
+ "rtk docker logs",
+ &raw,
+ &raw,
+ );
+ std::process::exit(output.status.code().unwrap_or(1));
+ }
+
let analyzed = crate::log_cmd::run_stdin_str(&raw);
- let rtk = format!("🐳 Logs for {}:\n{}", container, analyzed);
+ let rtk = format!("[docker] Logs for {}:\n{}", container, analyzed);
println!("{}", rtk);
timer.track(
&format!("docker logs {}", container),
@@ -208,10 +230,19 @@ fn kubectl_pods(args: &[String], _verbose: u8) -> Result<()> {
let raw = String::from_utf8_lossy(&output.stdout).to_string();
let mut rtk = String::new();
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ if !stderr.trim().is_empty() {
+ eprint!("{}", stderr);
+ }
+ timer.track("kubectl get pods", "rtk kubectl pods", &raw, &raw);
+ std::process::exit(output.status.code().unwrap_or(1));
+ }
+
let json: serde_json::Value = match serde_json::from_str(&raw) {
Ok(v) => v,
Err(_) => {
- rtk.push_str("☸️ No pods found");
+ rtk.push_str("No pods found");
println!("{}", rtk);
timer.track("kubectl get pods", "rtk kubectl pods", &raw, &rtk);
return Ok(());
@@ -219,7 +250,7 @@ fn kubectl_pods(args: &[String], _verbose: u8) -> Result<()> {
};
let Some(pods) = json["items"].as_array().filter(|a| !a.is_empty()) else {
- rtk.push_str("☸️ No pods found");
+ rtk.push_str("No pods found");
println!("{}", rtk);
timer.track("kubectl get pods", "rtk kubectl pods", &raw, &rtk);
return Ok(());
@@ -265,21 +296,21 @@ fn kubectl_pods(args: &[String], _verbose: u8) -> Result<()> {
let mut parts = Vec::new();
if running > 0 {
- parts.push(format!("{} ✓", running));
+ parts.push(format!("{}", running));
}
if pending > 0 {
parts.push(format!("{} pending", pending));
}
if failed > 0 {
- parts.push(format!("{} ✗", failed));
+ parts.push(format!("{} [x]", failed));
}
if restarts_total > 0 {
parts.push(format!("{} restarts", restarts_total));
}
- rtk.push_str(&format!("☸️ {} pods: {}\n", pods.len(), parts.join(", ")));
+ rtk.push_str(&format!("{} pods: {}\n", pods.len(), parts.join(", ")));
if !issues.is_empty() {
- rtk.push_str("⚠️ Issues:\n");
+ rtk.push_str("[warn] Issues:\n");
for issue in issues.iter().take(10) {
rtk.push_str(&format!(" {}\n", issue));
}
@@ -306,10 +337,19 @@ fn kubectl_services(args: &[String], _verbose: u8) -> Result<()> {
let raw = String::from_utf8_lossy(&output.stdout).to_string();
let mut rtk = String::new();
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ if !stderr.trim().is_empty() {
+ eprint!("{}", stderr);
+ }
+ timer.track("kubectl get svc", "rtk kubectl svc", &raw, &raw);
+ std::process::exit(output.status.code().unwrap_or(1));
+ }
+
let json: serde_json::Value = match serde_json::from_str(&raw) {
Ok(v) => v,
Err(_) => {
- rtk.push_str("☸️ No services found");
+ rtk.push_str("No services found");
println!("{}", rtk);
timer.track("kubectl get svc", "rtk kubectl svc", &raw, &rtk);
return Ok(());
@@ -317,12 +357,12 @@ fn kubectl_services(args: &[String], _verbose: u8) -> Result<()> {
};
let Some(services) = json["items"].as_array().filter(|a| !a.is_empty()) else {
- rtk.push_str("☸️ No services found");
+ rtk.push_str("No services found");
println!("{}", rtk);
timer.track("kubectl get svc", "rtk kubectl svc", &raw, &rtk);
return Ok(());
};
- rtk.push_str(&format!("☸️ {} services:\n", services.len()));
+ rtk.push_str(&format!("{} services:\n", services.len()));
for svc in services.iter().take(15) {
let ns = svc["metadata"]["namespace"].as_str().unwrap_or("-");
@@ -381,8 +421,23 @@ fn kubectl_logs(args: &[String], _verbose: u8) -> Result<()> {
let output = cmd.output().context("Failed to run kubectl logs")?;
let raw = String::from_utf8_lossy(&output.stdout).to_string();
+
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ if !stderr.trim().is_empty() {
+ eprint!("{}", stderr);
+ }
+ timer.track(
+ &format!("kubectl logs {}", pod),
+ "rtk kubectl logs",
+ &raw,
+ &raw,
+ );
+ std::process::exit(output.status.code().unwrap_or(1));
+ }
+
let analyzed = crate::log_cmd::run_stdin_str(&raw);
- let rtk = format!("☸️ Logs for {}:\n{}", pod, analyzed);
+ let rtk = format!("Logs for {}:\n{}", pod, analyzed);
println!("{}", rtk);
timer.track(
&format!("kubectl logs {}", pod),
@@ -400,10 +455,10 @@ pub fn format_compose_ps(raw: &str) -> String {
let lines: Vec<&str> = raw.lines().filter(|l| !l.trim().is_empty()).collect();
if lines.is_empty() {
- return "🐳 0 compose services".to_string();
+ return "[compose] 0 services".to_string();
}
- let mut result = format!("🐳 {} compose services:\n", lines.len());
+ let mut result = format!("[compose] {} services:\n", lines.len());
for line in lines.iter().take(20) {
let parts: Vec<&str> = line.split('\t').collect();
@@ -442,19 +497,19 @@ pub fn format_compose_ps(raw: &str) -> String {
/// Format `docker compose logs` output into compact form
pub fn format_compose_logs(raw: &str) -> String {
if raw.trim().is_empty() {
- return "🐳 No logs".to_string();
+ return "[compose] No logs".to_string();
}
// docker compose logs prefixes each line with "service-N | "
// Use the existing log deduplication engine
let analyzed = crate::log_cmd::run_stdin_str(raw);
- format!("🐳 Compose logs:\n{}", analyzed)
+ format!("[compose] Logs:\n{}", analyzed)
}
/// Format `docker compose build` output into compact summary
pub fn format_compose_build(raw: &str) -> String {
if raw.trim().is_empty() {
- return "🐳 Build: no output".to_string();
+ return "[compose] Build: no output".to_string();
}
let mut result = String::new();
@@ -462,7 +517,7 @@ pub fn format_compose_build(raw: &str) -> String {
// Extract the summary line: "[+] Building 12.3s (8/8) FINISHED"
for line in raw.lines() {
if line.contains("Building") && line.contains("FINISHED") {
- result.push_str(&format!("🐳 {}\n", line.trim()));
+ result.push_str(&format!("[compose] {}\n", line.trim()));
break;
}
}
@@ -470,9 +525,9 @@ pub fn format_compose_build(raw: &str) -> String {
if result.is_empty() {
// No FINISHED line found — might still be building or errored
if let Some(line) = raw.lines().find(|l| l.contains("Building")) {
- result.push_str(&format!("🐳 {}\n", line.trim()));
+ result.push_str(&format!("[compose] {}\n", line.trim()));
} else {
- result.push_str("🐳 Build:\n");
+ result.push_str("[compose] Build:\n");
}
}
@@ -516,7 +571,7 @@ fn compact_ports(ports: &str) -> String {
// Extract just the port numbers
let port_nums: Vec<&str> = ports
.split(',')
- .filter_map(|p| p.split("->").next().and_then(|s| s.split(':').last()))
+ .filter_map(|p| p.split("->").next().and_then(|s| s.split(':').next_back()))
.collect();
if port_nums.len() <= 3 {
@@ -771,8 +826,11 @@ mod tests {
let raw = "redis-1\tredis:7\tUp 5 hours\t";
let out = format_compose_ps(raw);
assert!(out.contains("redis"), "should show service name");
+ // Should not show port info when no ports (but [compose] prefix is OK)
+ let lines: Vec<&str> = out.lines().collect();
+ let redis_line = lines.iter().find(|l| l.contains("redis")).unwrap();
assert!(
- !out.contains("["),
+ !redis_line.contains("] ["),
"should not show port brackets when empty"
);
}
@@ -801,10 +859,7 @@ web-1 | 192.168.1.1 - GET /favicon.ico 404
api-1 | Server listening on port 3000
api-1 | Connected to database";
let out = format_compose_logs(raw);
- assert!(
- out.contains("Compose logs"),
- "should have compose logs header"
- );
+ assert!(out.contains("Logs"), "should have compose logs header");
}
#[test]
diff --git a/src/deps.rs b/src/deps.rs
index 29ea21e00..279029843 100644
--- a/src/deps.rs
+++ b/src/deps.rs
@@ -26,7 +26,7 @@ pub fn run(path: &Path, verbose: u8) -> Result<()> {
if cargo_path.exists() {
found = true;
raw.push_str(&fs::read_to_string(&cargo_path).unwrap_or_default());
- rtk.push_str("📦 Rust (Cargo.toml):\n");
+ rtk.push_str("Rust (Cargo.toml):\n");
rtk.push_str(&summarize_cargo_str(&cargo_path)?);
}
@@ -34,7 +34,7 @@ pub fn run(path: &Path, verbose: u8) -> Result<()> {
if package_path.exists() {
found = true;
raw.push_str(&fs::read_to_string(&package_path).unwrap_or_default());
- rtk.push_str("📦 Node.js (package.json):\n");
+ rtk.push_str("Node.js (package.json):\n");
rtk.push_str(&summarize_package_json_str(&package_path)?);
}
@@ -42,7 +42,7 @@ pub fn run(path: &Path, verbose: u8) -> Result<()> {
if requirements_path.exists() {
found = true;
raw.push_str(&fs::read_to_string(&requirements_path).unwrap_or_default());
- rtk.push_str("📦 Python (requirements.txt):\n");
+ rtk.push_str("Python (requirements.txt):\n");
rtk.push_str(&summarize_requirements_str(&requirements_path)?);
}
@@ -50,7 +50,7 @@ pub fn run(path: &Path, verbose: u8) -> Result<()> {
if pyproject_path.exists() {
found = true;
raw.push_str(&fs::read_to_string(&pyproject_path).unwrap_or_default());
- rtk.push_str("📦 Python (pyproject.toml):\n");
+ rtk.push_str("Python (pyproject.toml):\n");
rtk.push_str(&summarize_pyproject_str(&pyproject_path)?);
}
@@ -58,7 +58,7 @@ pub fn run(path: &Path, verbose: u8) -> Result<()> {
if gomod_path.exists() {
found = true;
raw.push_str(&fs::read_to_string(&gomod_path).unwrap_or_default());
- rtk.push_str("📦 Go (go.mod):\n");
+ rtk.push_str("Go (go.mod):\n");
rtk.push_str(&summarize_gomod_str(&gomod_path)?);
}
diff --git a/src/diff_cmd.rs b/src/diff_cmd.rs
index 136082540..d9299eb54 100644
--- a/src/diff_cmd.rs
+++ b/src/diff_cmd.rs
@@ -22,7 +22,7 @@ pub fn run(file1: &Path, file2: &Path, verbose: u8) -> Result<()> {
let mut rtk = String::new();
if diff.added == 0 && diff.removed == 0 {
- rtk.push_str("✅ Files are identical");
+ rtk.push_str("[ok] Files are identical");
println!("{}", rtk);
timer.track(
&format!("diff {} {}", file1.display(), file2.display()),
@@ -33,7 +33,7 @@ pub fn run(file1: &Path, file2: &Path, verbose: u8) -> Result<()> {
return Ok(());
}
- rtk.push_str(&format!("📊 {} → {}\n", file1.display(), file2.display()));
+ rtk.push_str(&format!("{} → {}\n", file1.display(), file2.display()));
rtk.push_str(&format!(
" +{} added, -{} removed, ~{} modified\n\n",
diff.added, diff.removed, diff.modified
@@ -168,7 +168,7 @@ fn condense_unified_diff(diff: &str) -> String {
// File header
if line.starts_with("+++ ") {
if !current_file.is_empty() && (added > 0 || removed > 0) {
- result.push(format!("📄 {} (+{} -{})", current_file, added, removed));
+ result.push(format!("[file] {} (+{} -{})", current_file, added, removed));
for c in changes.iter().take(10) {
result.push(format!(" {}", c));
}
@@ -199,7 +199,7 @@ fn condense_unified_diff(diff: &str) -> String {
// Last file
if !current_file.is_empty() && (added > 0 || removed > 0) {
- result.push(format!("📄 {} (+{} -{})", current_file, added, removed));
+ result.push(format!("[file] {} (+{} -{})", current_file, added, removed));
for c in changes.iter().take(10) {
result.push(format!(" {}", c));
}
diff --git a/src/discover/provider.rs b/src/discover/provider.rs
index e9218b2db..b4105a9d1 100644
--- a/src/discover/provider.rs
+++ b/src/discover/provider.rs
@@ -18,10 +18,15 @@ pub struct ExtractedCommand {
/// Whether the tool_result indicated an error
pub is_error: bool,
/// Chronological sequence index within the session
+ #[allow(dead_code)]
pub sequence_index: usize,
}
-/// Trait for session providers (Claude Code, future: Cursor, Windsurf).
+/// Trait for session providers (Claude Code, OpenCode, etc.).
+///
+/// Note: Cursor Agent transcripts use a text-only format without structured
+/// tool_use/tool_result blocks, so command extraction is not possible.
+/// Use `rtk gain` to track savings for Cursor sessions instead.
pub trait SessionProvider {
fn discover_sessions(
&self,
@@ -347,7 +352,7 @@ mod tests {
let cmds = provider.extract_commands(jsonl.path()).unwrap();
assert_eq!(cmds.len(), 1);
assert_eq!(cmds[0].command, "git commit --ammend");
- assert_eq!(cmds[0].is_error, true);
+ assert!(cmds[0].is_error);
assert!(cmds[0].output_content.is_some());
assert_eq!(
cmds[0].output_content.as_ref().unwrap(),
@@ -365,8 +370,8 @@ mod tests {
let provider = ClaudeProvider;
let cmds = provider.extract_commands(jsonl.path()).unwrap();
assert_eq!(cmds.len(), 2);
- assert_eq!(cmds[0].is_error, false);
- assert_eq!(cmds[1].is_error, true);
+ assert!(!cmds[0].is_error);
+ assert!(cmds[1].is_error);
}
#[test]
diff --git a/src/discover/registry.rs b/src/discover/registry.rs
index ffe7748cc..fafdaa8bd 100644
--- a/src/discover/registry.rs
+++ b/src/discover/registry.rs
@@ -48,6 +48,10 @@ lazy_static! {
.collect();
static ref ENV_PREFIX: Regex =
Regex::new(r"^(?:sudo\s+|env\s+|[A-Z_][A-Z0-9_]*=[^\s]*\s+)+").unwrap();
+ // Git global options that appear before the subcommand: -C , -c ,
+ // --git-dir , --work-tree , and flag-only options (#163)
+ static ref GIT_GLOBAL_OPT: Regex =
+ Regex::new(r"^(?:(?:-C\s+\S+|-c\s+\S+|--git-dir(?:=\S+|\s+\S+)|--work-tree(?:=\S+|\s+\S+)|--no-pager|--no-optional-locks|--bare|--literal-pathspecs)\s+)+").unwrap();
}
/// Classify a single (already-split) command.
@@ -76,6 +80,12 @@ pub fn classify_command(cmd: &str) -> Classification {
return Classification::Ignored;
}
+ // Normalize absolute binary paths: /usr/bin/grep → grep (#485)
+ let cmd_normalized = strip_absolute_path(cmd_clean);
+ // Strip git global options: git -C /tmp status → git status (#163)
+ let cmd_normalized = strip_git_global_opts(&cmd_normalized);
+ let cmd_clean = cmd_normalized.as_str();
+
// Exclude cat/head/tail with redirect operators — these are writes, not reads (#315)
if cmd_clean.starts_with("cat ")
|| cmd_clean.starts_with("head ")
@@ -262,6 +272,42 @@ pub fn split_command_chain(cmd: &str) -> Vec<&str> {
results
}
+/// Strip git global options before the subcommand (#163).
+/// `git -C /tmp status` → `git status`, preserving the rest.
+/// Returns the original string unchanged if not a git command.
+fn strip_git_global_opts(cmd: &str) -> String {
+ // Only applies to commands starting with "git "
+ if !cmd.starts_with("git ") {
+ return cmd.to_string();
+ }
+ let after_git = &cmd[4..]; // skip "git "
+ let stripped = GIT_GLOBAL_OPT.replace(after_git, "");
+ format!("git {}", stripped.trim())
+}
+
+/// Normalize absolute binary paths: `/usr/bin/grep -rn foo` → `grep -rn foo` (#485)
+/// Only strips if the first word contains a `/` (Unix path).
+fn strip_absolute_path(cmd: &str) -> String {
+ let first_space = cmd.find(' ');
+ let first_word = match first_space {
+ Some(pos) => &cmd[..pos],
+ None => cmd,
+ };
+ if first_word.contains('/') {
+ // Extract basename
+ let basename = first_word.rsplit('/').next().unwrap_or(first_word);
+ if basename.is_empty() {
+ return cmd.to_string();
+ }
+ match first_space {
+ Some(pos) => format!("{}{}", basename, &cmd[pos..]),
+ None => basename.to_string(),
+ }
+ } else {
+ cmd.to_string()
+ }
+}
+
/// Check if a command has RTK_DISABLED= prefix in its env prefix portion.
pub fn has_rtk_disabled_prefix(cmd: &str) -> bool {
let trimmed = cmd.trim();
@@ -281,9 +327,34 @@ pub fn strip_disabled_prefix(cmd: &str) -> &str {
trimmed[prefix_len..].trim_start()
}
-/// Rewrite a raw command to its RTK equivalent.
-///
-/// Returns `Some(rewritten)` if the command has an RTK equivalent or is already RTK.
+lazy_static! {
+ // Match trailing shell redirections:
+ // Alt 1: N>&M or N>&- (fd redirect/close): 2>&1, 1>&2, 2>&-
+ // Alt 2: &>file or &>>file (bash redirect both): &>/dev/null
+ // Alt 3: N>file or N>>file (fd to file): 2>/dev/null, >/tmp/out, 1>>log
+ // Note: [^(\\s] excludes process substitutions like >(tee) from false-positive matching
+ static ref TRAILING_REDIRECT: Regex =
+ Regex::new(r"\s+(?:[0-9]?>&[0-9-]|&>>?\S+|[0-9]?>>?\s*[^(\s]\S*)\s*$").unwrap();
+}
+
+/// Strip trailing stderr/stdout redirects from a command segment (#530).
+/// Returns (command_without_redirects, redirect_suffix).
+fn strip_trailing_redirects(cmd: &str) -> (&str, &str) {
+ if let Some(m) = TRAILING_REDIRECT.find(cmd) {
+ // Verify redirect is not inside quotes (single-pass count)
+ let before = &cmd[..m.start()];
+ let (sq, dq) = before.chars().fold((0u32, 0u32), |(s, d), c| match c {
+ '\'' => (s + 1, d),
+ '"' => (s, d + 1),
+ _ => (s, d),
+ });
+ if sq % 2 == 0 && dq % 2 == 0 {
+ return (&cmd[..m.start()], &cmd[m.start()..]);
+ }
+ }
+ (cmd, "")
+}
+
/// Returns `None` if the command is unsupported or ignored (hook should pass through).
///
/// Handles compound commands (`&&`, `||`, `;`) by rewriting each segment independently.
@@ -355,8 +426,18 @@ fn rewrite_compound(cmd: &str, excluded: &[String]) -> Option {
} else {
// `|` pipe — rewrite first segment only, pass through the rest unchanged
let seg = cmd[seg_start..i].trim();
- let rewritten =
- rewrite_segment(seg, excluded).unwrap_or_else(|| seg.to_string());
+ // Skip rewriting `find`/`fd` in pipes — rtk find outputs a grouped
+ // format that is incompatible with pipe consumers like xargs, grep,
+ // wc, sort, etc. which expect one path per line (#439).
+ let is_pipe_incompatible = seg.starts_with("find ")
+ || seg == "find"
+ || seg.starts_with("fd ")
+ || seg == "fd";
+ let rewritten = if is_pipe_incompatible {
+ seg.to_string()
+ } else {
+ rewrite_segment(seg, excluded).unwrap_or_else(|| seg.to_string())
+ };
if rewritten != seg {
any_changed = true;
}
@@ -509,8 +590,12 @@ fn rewrite_segment(seg: &str, excluded: &[String]) -> Option {
return None;
}
+ // Strip trailing stderr/stdout redirects before matching (#530)
+ // e.g. "git status 2>&1" → match "git status", re-append " 2>&1"
+ let (cmd_part, redirect_suffix) = strip_trailing_redirects(trimmed);
+
// Already RTK — pass through unchanged
- if trimmed.starts_with("rtk ") || trimmed == "rtk" {
+ if cmd_part.starts_with("rtk ") || cmd_part == "rtk" {
return Some(trimmed.to_string());
}
@@ -518,21 +603,21 @@ fn rewrite_segment(seg: &str, excluded: &[String]) -> Option {
// Must intercept before generic prefix replacement, which would produce `rtk read -20 file`.
// Only intercept when head has a flag (-N, --lines=N, -c, etc.); plain `head file` falls
// through to the generic rewrite below and produces `rtk read file` as expected.
- if trimmed.starts_with("head -") {
- return rewrite_head_numeric(trimmed);
+ if cmd_part.starts_with("head -") {
+ return rewrite_head_numeric(cmd_part).map(|r| format!("{}{}", r, redirect_suffix));
}
// tail has several forms that are not compatible with generic prefix replacement.
// Only rewrite recognized numeric line forms; otherwise skip rewrite.
- if trimmed.starts_with("tail ") {
- return rewrite_tail_lines(trimmed);
+ if cmd_part.starts_with("tail ") {
+ return rewrite_tail_lines(cmd_part).map(|r| format!("{}{}", r, redirect_suffix));
}
// Use classify_command for correct ignore/prefix handling
- let rtk_equivalent = match classify_command(trimmed) {
+ let rtk_equivalent = match classify_command(cmd_part) {
Classification::Supported { rtk_equivalent, .. } => {
// Check if the base command is excluded from rewriting (#243)
- let base = trimmed.split_whitespace().next().unwrap_or("");
+ let base = cmd_part.split_whitespace().next().unwrap_or("");
if excluded.iter().any(|e| e == base) {
return None;
}
@@ -545,13 +630,13 @@ fn rewrite_segment(seg: &str, excluded: &[String]) -> Option {
let rule = RULES.iter().find(|r| r.rtk_cmd == rtk_equivalent)?;
// Extract env prefix (sudo, env VAR=val, etc.)
- let stripped_cow = ENV_PREFIX.replace(trimmed, "");
- let env_prefix_len = trimmed.len() - stripped_cow.len();
- let env_prefix = &trimmed[..env_prefix_len];
+ let stripped_cow = ENV_PREFIX.replace(cmd_part, "");
+ let env_prefix_len = cmd_part.len() - stripped_cow.len();
+ let env_prefix = &cmd_part[..env_prefix_len];
let cmd_clean = stripped_cow.trim();
// #345: RTK_DISABLED=1 in env prefix → skip rewrite entirely
- if has_rtk_disabled_prefix(trimmed) {
+ if has_rtk_disabled_prefix(cmd_part) {
return None;
}
@@ -571,9 +656,9 @@ fn rewrite_segment(seg: &str, excluded: &[String]) -> Option {
for &prefix in rule.rewrite_prefixes {
if let Some(rest) = strip_word_prefix(cmd_clean, prefix) {
let rewritten = if rest.is_empty() {
- format!("{}{}", env_prefix, rule.rtk_cmd)
+ format!("{}{}{}", env_prefix, rule.rtk_cmd, redirect_suffix)
} else {
- format!("{}{} {}", env_prefix, rule.rtk_cmd, rest)
+ format!("{}{} {}{}", env_prefix, rule.rtk_cmd, rest, redirect_suffix)
};
return Some(rewritten);
}
@@ -679,12 +764,10 @@ mod tests {
"tail -f app.log > /dev/null",
];
for cmd in &write_commands {
- match classify_command(cmd) {
- Classification::Supported { .. } => {
- panic!("{} should NOT be classified as Supported", cmd)
- }
- _ => {} // Unsupported or Ignored is fine
+ if let Classification::Supported { .. } = classify_command(cmd) {
+ panic!("{} should NOT be classified as Supported", cmd)
}
+ // Unsupported or Ignored is fine
}
}
@@ -1113,6 +1196,30 @@ mod tests {
);
}
+ #[test]
+ fn test_rewrite_find_pipe_skipped() {
+ // find in a pipe should NOT be rewritten — rtk find output format
+ // is incompatible with pipe consumers like xargs (#439)
+ assert_eq!(
+ rewrite_command("find . -name '*.rs' | xargs grep 'fn run'", &[]),
+ None
+ );
+ }
+
+ #[test]
+ fn test_rewrite_find_pipe_xargs_wc() {
+ assert_eq!(rewrite_command("find src -type f | wc -l", &[]), None);
+ }
+
+ #[test]
+ fn test_rewrite_find_no_pipe_still_rewritten() {
+ // find WITHOUT a pipe should still be rewritten
+ assert_eq!(
+ rewrite_command("find . -name '*.rs'", &[]),
+ Some("rtk find . -name '*.rs'".into())
+ );
+ }
+
#[test]
fn test_rewrite_heredoc_returns_none() {
assert_eq!(rewrite_command("cat <<'EOF'\nfoo\nEOF", &[]), None);
@@ -1207,6 +1314,35 @@ mod tests {
);
}
+ #[test]
+ fn test_rewrite_redirect_double() {
+ // Double redirect: only last one stripped, but full command rewrites correctly
+ assert_eq!(
+ rewrite_command("git status 2>&1 >/dev/null", &[]),
+ Some("rtk git status 2>&1 >/dev/null".into())
+ );
+ }
+
+ #[test]
+ fn test_rewrite_redirect_fd_close() {
+ // 2>&- (close stderr fd)
+ assert_eq!(
+ rewrite_command("git status 2>&-", &[]),
+ Some("rtk git status 2>&-".into())
+ );
+ }
+
+ #[test]
+ fn test_rewrite_redirect_quotes_not_stripped() {
+ // Redirect-like chars inside quotes should NOT be stripped
+ // Known limitation: apostrophes cause conservative no-strip (safe fallback)
+ let result = rewrite_command("git commit -m \"it's fixed\" 2>&1", &[]);
+ assert!(
+ result.is_some(),
+ "Should still rewrite even with apostrophe"
+ );
+ }
+
#[test]
fn test_rewrite_background_amp_non_regression() {
// background `&` must still work after redirect fix
@@ -2061,4 +2197,132 @@ mod tests {
);
assert_eq!(strip_disabled_prefix("git status"), "git status");
}
+
+ // --- #485: absolute path normalization ---
+
+ #[test]
+ fn test_classify_absolute_path_grep() {
+ assert_eq!(
+ classify_command("/usr/bin/grep -rni pattern"),
+ Classification::Supported {
+ rtk_equivalent: "rtk grep",
+ category: "Files",
+ estimated_savings_pct: 75.0,
+ status: RtkStatus::Existing,
+ }
+ );
+ }
+
+ #[test]
+ fn test_classify_absolute_path_ls() {
+ assert_eq!(
+ classify_command("/bin/ls -la"),
+ Classification::Supported {
+ rtk_equivalent: "rtk ls",
+ category: "Files",
+ estimated_savings_pct: 65.0,
+ status: RtkStatus::Existing,
+ }
+ );
+ }
+
+ #[test]
+ fn test_classify_absolute_path_git() {
+ assert_eq!(
+ classify_command("/usr/local/bin/git status"),
+ Classification::Supported {
+ rtk_equivalent: "rtk git",
+ category: "Git",
+ estimated_savings_pct: 70.0,
+ status: RtkStatus::Existing,
+ }
+ );
+ }
+
+ #[test]
+ fn test_classify_absolute_path_no_args() {
+ // /usr/bin/find alone → still classified
+ assert_eq!(
+ classify_command("/usr/bin/find ."),
+ Classification::Supported {
+ rtk_equivalent: "rtk find",
+ category: "Files",
+ estimated_savings_pct: 70.0,
+ status: RtkStatus::Existing,
+ }
+ );
+ }
+
+ #[test]
+ fn test_strip_absolute_path_helper() {
+ assert_eq!(strip_absolute_path("/usr/bin/grep -rn foo"), "grep -rn foo");
+ assert_eq!(strip_absolute_path("/bin/ls -la"), "ls -la");
+ assert_eq!(strip_absolute_path("grep -rn foo"), "grep -rn foo");
+ assert_eq!(strip_absolute_path("/usr/local/bin/git"), "git");
+ }
+
+ // --- #163: git global options ---
+
+ #[test]
+ fn test_classify_git_with_dash_c_path() {
+ assert_eq!(
+ classify_command("git -C /tmp status"),
+ Classification::Supported {
+ rtk_equivalent: "rtk git",
+ category: "Git",
+ estimated_savings_pct: 70.0,
+ status: RtkStatus::Existing,
+ }
+ );
+ }
+
+ #[test]
+ fn test_classify_git_no_pager_log() {
+ assert_eq!(
+ classify_command("git --no-pager log -5"),
+ Classification::Supported {
+ rtk_equivalent: "rtk git",
+ category: "Git",
+ estimated_savings_pct: 70.0,
+ status: RtkStatus::Existing,
+ }
+ );
+ }
+
+ #[test]
+ fn test_classify_git_git_dir() {
+ assert_eq!(
+ classify_command("git --git-dir /tmp/.git status"),
+ Classification::Supported {
+ rtk_equivalent: "rtk git",
+ category: "Git",
+ estimated_savings_pct: 70.0,
+ status: RtkStatus::Existing,
+ }
+ );
+ }
+
+ #[test]
+ fn test_rewrite_git_dash_c() {
+ assert_eq!(
+ rewrite_command("git -C /tmp status", &[]),
+ Some("rtk git -C /tmp status".to_string())
+ );
+ }
+
+ #[test]
+ fn test_rewrite_git_no_pager() {
+ assert_eq!(
+ rewrite_command("git --no-pager log -5", &[]),
+ Some("rtk git --no-pager log -5".to_string())
+ );
+ }
+
+ #[test]
+ fn test_strip_git_global_opts_helper() {
+ assert_eq!(strip_git_global_opts("git -C /tmp status"), "git status");
+ assert_eq!(strip_git_global_opts("git --no-pager log"), "git log");
+ assert_eq!(strip_git_global_opts("git status"), "git status");
+ assert_eq!(strip_git_global_opts("cargo test"), "cargo test");
+ }
}
diff --git a/src/discover/report.rs b/src/discover/report.rs
index 5d05f150a..5b1fe8016 100644
--- a/src/discover/report.rs
+++ b/src/discover/report.rs
@@ -165,6 +165,14 @@ pub fn format_text(report: &DiscoverReport, limit: usize, verbose: bool) -> Stri
out.push_str("\n~estimated from tool_result output sizes\n");
+ // Cursor note: check if Cursor hooks are installed
+ if let Some(home) = dirs::home_dir() {
+ let cursor_hook = home.join(".cursor").join("hooks").join("rtk-rewrite.sh");
+ if cursor_hook.exists() {
+ out.push_str("\nNote: Cursor sessions are tracked via `rtk gain` (discover scans Claude Code only)\n");
+ }
+ }
+
if verbose && report.parse_errors > 0 {
out.push_str(&format!("Parse errors skipped: {}\n", report.parse_errors));
}
diff --git a/src/discover/rules.rs b/src/discover/rules.rs
index 92582ee87..8df8f46ce 100644
--- a/src/discover/rules.rs
+++ b/src/discover/rules.rs
@@ -46,6 +46,11 @@ pub const PATTERNS: &[&str] = &[
// Go tooling
r"^go\s+(test|build|vet)",
r"^golangci-lint(\s|$)",
+ // Ruby tooling
+ r"^bundle\s+(install|update)\b",
+ r"^(?:bundle\s+exec\s+)?(?:bin/)?(?:rake|rails)\s+test",
+ r"^(?:bundle\s+exec\s+)?rspec(?:\s|$)",
+ r"^(?:bundle\s+exec\s+)?rubocop(?:\s|$)",
// AWS CLI
r"^aws\s+",
// PostgreSQL
@@ -343,6 +348,45 @@ pub const RULES: &[RtkRule] = &[
subcmd_savings: &[],
subcmd_status: &[],
},
+ // Ruby tooling
+ RtkRule {
+ rtk_cmd: "rtk bundle",
+ rewrite_prefixes: &["bundle"],
+ category: "Ruby",
+ savings_pct: 70.0,
+ subcmd_savings: &[],
+ subcmd_status: &[],
+ },
+ RtkRule {
+ rtk_cmd: "rtk rake",
+ rewrite_prefixes: &[
+ "bundle exec rails",
+ "bundle exec rake",
+ "bin/rails",
+ "rails",
+ "rake",
+ ],
+ category: "Ruby",
+ savings_pct: 85.0,
+ subcmd_savings: &[("test", 90.0)],
+ subcmd_status: &[],
+ },
+ RtkRule {
+ rtk_cmd: "rtk rspec",
+ rewrite_prefixes: &["bundle exec rspec", "bin/rspec", "rspec"],
+ category: "Tests",
+ savings_pct: 65.0,
+ subcmd_savings: &[],
+ subcmd_status: &[],
+ },
+ RtkRule {
+ rtk_cmd: "rtk rubocop",
+ rewrite_prefixes: &["bundle exec rubocop", "rubocop"],
+ category: "Build",
+ savings_pct: 65.0,
+ subcmd_savings: &[],
+ subcmd_status: &[],
+ },
// AWS CLI
RtkRule {
rtk_cmd: "rtk aws",
diff --git a/src/display_helpers.rs b/src/display_helpers.rs
index a102c397c..60354c7cd 100644
--- a/src/display_helpers.rs
+++ b/src/display_helpers.rs
@@ -21,7 +21,7 @@ pub fn format_duration(ms: u64) -> String {
/// Trait for period-based statistics that can be displayed in tables
pub trait PeriodStats {
- /// Icon for this period type (e.g., "📅", "📊", "📆")
+ /// Icon for this period type (e.g., "D", "W", "M")
fn icon() -> &'static str;
/// Label for this period type (e.g., "Daily", "Weekly", "Monthly")
@@ -143,7 +143,7 @@ pub fn print_period_table(data: &[T]) {
impl PeriodStats for DayStats {
fn icon() -> &'static str {
- "📅"
+ "D"
}
fn label() -> &'static str {
@@ -193,7 +193,7 @@ impl PeriodStats for DayStats {
impl PeriodStats for WeekStats {
fn icon() -> &'static str {
- "📊"
+ "W"
}
fn label() -> &'static str {
@@ -253,7 +253,7 @@ impl PeriodStats for WeekStats {
impl PeriodStats for MonthStats {
fn icon() -> &'static str {
- "📆"
+ "M"
}
fn label() -> &'static str {
@@ -322,7 +322,7 @@ mod tests {
assert_eq!(day.commands(), 10);
assert_eq!(day.saved_tokens(), 200);
assert_eq!(day.avg_time_ms(), 150);
- assert_eq!(DayStats::icon(), "📅");
+ assert_eq!(DayStats::icon(), "D");
assert_eq!(DayStats::label(), "Daily");
}
@@ -342,7 +342,7 @@ mod tests {
assert_eq!(week.period(), "01-20 → 01-26");
assert_eq!(week.avg_time_ms(), 100);
- assert_eq!(WeekStats::icon(), "📊");
+ assert_eq!(WeekStats::icon(), "W");
assert_eq!(WeekStats::label(), "Weekly");
}
@@ -361,7 +361,7 @@ mod tests {
assert_eq!(month.period(), "2026-01");
assert_eq!(month.avg_time_ms(), 100);
- assert_eq!(MonthStats::icon(), "📆");
+ assert_eq!(MonthStats::icon(), "M");
assert_eq!(MonthStats::label(), "Monthly");
}
diff --git a/src/dotnet_cmd.rs b/src/dotnet_cmd.rs
index 07bc0d3ac..dde3bba56 100644
--- a/src/dotnet_cmd.rs
+++ b/src/dotnet_cmd.rs
@@ -4,6 +4,9 @@ use crate::dotnet_trx;
use crate::tracking;
use crate::utils::{resolved_command, truncate};
use anyhow::{Context, Result};
+use quick_xml::events::Event;
+use quick_xml::Reader;
+use serde_json::Value;
use std::ffi::OsString;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicU64, Ordering};
@@ -492,25 +495,56 @@ fn build_effective_dotnet_args(
effective.push("-v:minimal".to_string());
}
- if !has_nologo_arg(args) {
+ let runner_mode = if subcommand == "test" {
+ detect_test_runner_mode(args)
+ } else {
+ TestRunnerMode::Classic
+ };
+
+ // --nologo: skip for MtpNative — args pass directly to the MTP runtime which
+ // does not understand MSBuild/VSTest flags.
+ if runner_mode != TestRunnerMode::MtpNative && !has_nologo_arg(args) {
effective.push("-nologo".to_string());
}
if subcommand == "test" {
- if !has_trx_logger_arg(args) {
- effective.push("--logger".to_string());
- effective.push("trx".to_string());
- }
-
- if !has_results_directory_arg(args) {
- if let Some(results_dir) = trx_results_dir {
- effective.push("--results-directory".to_string());
- effective.push(results_dir.display().to_string());
+ match runner_mode {
+ TestRunnerMode::Classic => {
+ if !has_trx_logger_arg(args) {
+ effective.push("--logger".to_string());
+ effective.push("trx".to_string());
+ }
+ if !has_results_directory_arg(args) {
+ if let Some(results_dir) = trx_results_dir {
+ effective.push("--results-directory".to_string());
+ effective.push(results_dir.display().to_string());
+ }
+ }
+ effective.extend(args.iter().cloned());
+ }
+ TestRunnerMode::MtpNative => {
+ // In .NET 10 native MTP mode, --report-trx is a direct dotnet test flag.
+ // Modern MTP frameworks (TUnit 1.19.74+, MSTest, xUnit with MTP runner)
+ // include Microsoft.Testing.Extensions.TrxReport natively.
+ if !has_report_trx_arg(args) {
+ effective.push("--report-trx".to_string());
+ }
+ effective.extend(args.iter().cloned());
+ }
+ TestRunnerMode::MtpVsTestBridge => {
+ // In VsTestBridge mode (supported on .NET 9 SDK and earlier), --report-trx
+ // goes after the -- separator so it reaches the MTP runtime.
+ if !has_report_trx_arg(args) {
+ effective.extend(inject_report_trx_into_args(args));
+ } else {
+ effective.extend(args.iter().cloned());
+ }
}
}
+ } else {
+ effective.extend(args.iter().cloned());
}
- effective.extend(args.iter().cloned());
effective
}
@@ -533,6 +567,176 @@ fn has_verbosity_arg(args: &[String]) -> bool {
})
}
+/// How the targeted test project(s) run tests — determines which TRX injection strategy to use.
+#[derive(Debug, PartialEq)]
+enum TestRunnerMode {
+ /// Classic VSTest runner. Inject `--logger trx --results-directory`.
+ Classic,
+ /// Native MTP runner (`UseMicrosoftTestingPlatformRunner`, `UseTestingPlatformRunner`, or
+ /// global.json MTP mode). `--logger trx` breaks the run; inject `--report-trx` directly.
+ MtpNative,
+ /// VSTest bridge for MTP (`TestingPlatformDotnetTestSupport=true`). `--logger trx` is
+ /// silently ignored; MTP args must come after `--`. Inject `-- --report-trx`.
+ MtpVsTestBridge,
+}
+
+/// Which MTP-related property a single MSBuild file declares.
+#[derive(Debug, PartialEq)]
+enum MtpProjectKind {
+ None,
+ VsTestBridge, // UseMicrosoftTestingPlatformRunner | UseTestingPlatformRunner | TestingPlatformDotnetTestSupport
+}
+
+/// Scans a single MSBuild file (.csproj / .fsproj / .vbproj / Directory.Build.props) for
+/// MTP-related properties and returns which kind it is.
+fn scan_mtp_kind_in_file(path: &Path) -> MtpProjectKind {
+ let content = match std::fs::read_to_string(path) {
+ Ok(c) => c,
+ Err(_) => return MtpProjectKind::None,
+ };
+
+ let mut reader = Reader::from_str(&content);
+ reader.config_mut().trim_text(true);
+ let mut buf = Vec::new();
+ let mut inside_mtp_element = false;
+
+ loop {
+ match reader.read_event_into(&mut buf) {
+ Ok(Event::Start(e)) => {
+ let name_lower = e.local_name().as_ref().to_ascii_lowercase();
+ // All project-file MTP properties run in VSTest bridge mode and require
+ // MTP-specific args to come after `--`. Only global.json MTP mode is native.
+ inside_mtp_element = matches!(
+ name_lower.as_slice(),
+ b"usemicrosofttestingplatformrunner"
+ | b"usetestingplatformrunner"
+ | b"testingplatformdotnettestsupport"
+ );
+ }
+ Ok(Event::Text(e)) => {
+ if inside_mtp_element {
+ if let Ok(text) = e.unescape() {
+ if text.trim().eq_ignore_ascii_case("true") {
+ return MtpProjectKind::VsTestBridge;
+ }
+ }
+ }
+ }
+ Ok(Event::End(_)) => inside_mtp_element = false,
+ Ok(Event::Eof) => break,
+ Err(_) => break,
+ _ => {}
+ }
+ buf.clear();
+ }
+
+ MtpProjectKind::None
+}
+
+fn parse_global_json_mtp_mode(path: &Path) -> bool {
+ let Ok(content) = std::fs::read_to_string(path) else {
+ return false;
+ };
+ let Ok(json) = serde_json::from_str::(&content) else {
+ return false;
+ };
+ json.get("test")
+ .and_then(|t| t.get("runner"))
+ .and_then(|r| r.as_str())
+ .is_some_and(|r| r.eq_ignore_ascii_case("Microsoft.Testing.Platform"))
+}
+
+/// Checks whether the `global.json` closest to the current directory enables the .NET 10
+/// native MTP mode (`"test": { "runner": "Microsoft.Testing.Platform" }`).
+fn is_global_json_mtp_mode() -> bool {
+ let Ok(mut dir) = std::env::current_dir() else {
+ return false;
+ };
+ loop {
+ let path = dir.join("global.json");
+ if path.exists() {
+ let is_mtp = parse_global_json_mtp_mode(&path);
+ return is_mtp; // stop at first global.json found, regardless of result
+ }
+ if !dir.pop() {
+ break;
+ }
+ }
+ false
+}
+
+/// Detects which test runner mode the targeted project(s) use.
+///
+/// Priority order: global.json (MtpNative) > project-file/Directory.Build.props (MtpVsTestBridge) > Classic.
+/// `global.json` MTP mode is checked first because it overrides all project-level properties.
+fn detect_test_runner_mode(args: &[String]) -> TestRunnerMode {
+ // global.json MTP mode takes overall precedence — when set, dotnet test runs MTP
+ // natively regardless of project file properties.
+ if is_global_json_mtp_mode() {
+ return TestRunnerMode::MtpNative;
+ }
+
+ let project_extensions = ["csproj", "fsproj", "vbproj"];
+
+ let explicit_projects: Vec<&str> = args
+ .iter()
+ .map(String::as_str)
+ .filter(|a| {
+ let lower = a.to_ascii_lowercase();
+ project_extensions
+ .iter()
+ .any(|ext| lower.ends_with(&format!(".{ext}")))
+ })
+ .collect();
+
+ let mut found = MtpProjectKind::None;
+
+ if !explicit_projects.is_empty() {
+ for p in &explicit_projects {
+ if scan_mtp_kind_in_file(Path::new(p)) == MtpProjectKind::VsTestBridge {
+ found = MtpProjectKind::VsTestBridge;
+ }
+ }
+ } else {
+ // No explicit project — scan current directory.
+ if let Ok(entries) = std::fs::read_dir(".") {
+ for entry in entries.flatten() {
+ let name = entry.file_name();
+ let name_str = name.to_string_lossy().to_ascii_lowercase();
+ if project_extensions
+ .iter()
+ .any(|ext| name_str.ends_with(&format!(".{ext}")))
+ && scan_mtp_kind_in_file(&entry.path()) == MtpProjectKind::VsTestBridge
+ {
+ found = MtpProjectKind::VsTestBridge;
+ }
+ }
+ }
+ }
+
+ if found == MtpProjectKind::VsTestBridge {
+ return TestRunnerMode::MtpVsTestBridge;
+ }
+
+ // Walk up from current directory looking for Directory.Build.props.
+ if let Ok(mut dir) = std::env::current_dir() {
+ loop {
+ let props = dir.join("Directory.Build.props");
+ if props.exists() {
+ if scan_mtp_kind_in_file(&props) == MtpProjectKind::VsTestBridge {
+ return TestRunnerMode::MtpVsTestBridge;
+ }
+ break; // only read the first (closest) Directory.Build.props
+ }
+ if !dir.pop() {
+ break;
+ }
+ }
+ }
+
+ TestRunnerMode::Classic
+}
+
fn has_nologo_arg(args: &[String]) -> bool {
args.iter()
.any(|arg| matches!(arg.to_ascii_lowercase().as_str(), "-nologo" | "/nologo"))
@@ -578,6 +782,25 @@ fn has_report_arg(args: &[String]) -> bool {
})
}
+fn has_report_trx_arg(args: &[String]) -> bool {
+ args.iter().any(|a| a.eq_ignore_ascii_case("--report-trx"))
+}
+
+/// Injects `--report-trx` after the `--` separator in `args`.
+/// If no `--` separator exists, appends `-- --report-trx` at the end.
+fn inject_report_trx_into_args(args: &[String]) -> Vec {
+ if let Some(sep) = args.iter().position(|a| a == "--") {
+ let mut result = args.to_vec();
+ result.insert(sep + 1, "--report-trx".to_string());
+ result
+ } else {
+ let mut result = args.to_vec();
+ result.push("--".to_string());
+ result.push("--report-trx".to_string());
+ result
+ }
+}
+
fn extract_report_arg(args: &[String]) -> Option {
let mut iter = args.iter().peekable();
while let Some(arg) = iter.next() {
@@ -1474,6 +1697,336 @@ mod tests {
.any(|w| w[0] == "--results-directory" && w[1] == "/custom/results"));
}
+ #[test]
+ fn test_scan_mtp_kind_detects_use_microsoft_testing_platform_runner() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let csproj = temp_dir.path().join("MyProject.csproj");
+ fs::write(
+ &csproj,
+ r#"
+
+ true
+
+ "#,
+ )
+ .expect("write csproj");
+
+ assert_eq!(scan_mtp_kind_in_file(&csproj), MtpProjectKind::VsTestBridge);
+ }
+
+ #[test]
+ fn test_scan_mtp_kind_detects_use_testing_platform_runner() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let csproj = temp_dir.path().join("MyProject.csproj");
+ fs::write(
+ &csproj,
+ r#"
+
+ true
+
+ "#,
+ )
+ .expect("write csproj");
+
+ assert_eq!(scan_mtp_kind_in_file(&csproj), MtpProjectKind::VsTestBridge);
+ }
+
+ #[test]
+ fn test_is_mtp_project_file_returns_false_for_classic_vstest() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let csproj = temp_dir.path().join("MyProject.csproj");
+ fs::write(
+ &csproj,
+ r#"
+
+ net9.0
+
+
+
+
+ "#,
+ )
+ .expect("write csproj");
+
+ assert_eq!(scan_mtp_kind_in_file(&csproj), MtpProjectKind::None);
+ }
+
+ #[test]
+ fn test_scan_mtp_kind_returns_none_when_value_is_false() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let csproj = temp_dir.path().join("MyProject.csproj");
+ fs::write(
+ &csproj,
+ r#"
+
+ false
+
+ "#,
+ )
+ .expect("write csproj");
+
+ assert_eq!(scan_mtp_kind_in_file(&csproj), MtpProjectKind::None);
+ }
+
+ #[test]
+ fn test_scan_mtp_kind_detects_vstest_bridge() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let csproj = temp_dir.path().join("MSTest.Tests.csproj");
+ fs::write(
+ &csproj,
+ r#"
+
+ true
+
+ "#,
+ )
+ .expect("write csproj");
+
+ assert_eq!(scan_mtp_kind_in_file(&csproj), MtpProjectKind::VsTestBridge);
+ }
+
+ #[test]
+ fn test_both_mtp_properties_in_same_file_still_vstest_bridge() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let csproj = temp_dir.path().join("Hybrid.Tests.csproj");
+ fs::write(
+ &csproj,
+ r#"
+
+ true
+ true
+
+ "#,
+ )
+ .expect("write csproj");
+
+ // All project-file properties → VsTestBridge; only global.json gives MtpNative
+ assert_eq!(scan_mtp_kind_in_file(&csproj), MtpProjectKind::VsTestBridge);
+ }
+
+ #[test]
+ fn test_detect_mode_mtp_csproj_is_vstest_bridge_injects_report_trx() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let csproj = temp_dir.path().join("MTP.Tests.csproj");
+ fs::write(
+ &csproj,
+ r#"
+
+ true
+
+ "#,
+ )
+ .expect("write csproj");
+
+ let args = vec![csproj.display().to_string()];
+ assert_eq!(
+ detect_test_runner_mode(&args),
+ TestRunnerMode::MtpVsTestBridge
+ );
+
+ let binlog_path = Path::new("/tmp/test.binlog");
+ let injected = build_effective_dotnet_args("test", &args, binlog_path, None);
+
+ // MTP VsTestBridge → --report-trx injected after --, no VSTest --logger trx
+ assert!(!injected.contains(&"--logger".to_string()));
+ assert!(injected.contains(&"--report-trx".to_string()));
+ assert!(injected.contains(&"--".to_string()));
+ }
+
+ #[test]
+ fn test_detect_mode_vstest_bridge_injects_report_trx() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let csproj = temp_dir.path().join("MSTest.Tests.csproj");
+ fs::write(
+ &csproj,
+ r#"
+
+ true
+
+ "#,
+ )
+ .expect("write csproj");
+
+ let args = vec![csproj.display().to_string()];
+ assert_eq!(
+ detect_test_runner_mode(&args),
+ TestRunnerMode::MtpVsTestBridge
+ );
+
+ let binlog_path = Path::new("/tmp/test.binlog");
+ let injected = build_effective_dotnet_args("test", &args, binlog_path, None);
+
+ // --report-trx injected after --, --nologo supported in bridge mode
+ assert!(!injected.contains(&"--logger".to_string()));
+ assert!(injected.contains(&"--report-trx".to_string()));
+ assert!(injected.contains(&"--".to_string()));
+ assert!(injected.contains(&"-nologo".to_string()));
+ }
+
+ #[test]
+ fn test_parse_global_json_mtp_mode_detects_mtp_native() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let global_json = temp_dir.path().join("global.json");
+ fs::write(
+ &global_json,
+ r#"{"sdk":{"version":"10.0.100"},"test":{"runner":"Microsoft.Testing.Platform"}}"#,
+ )
+ .expect("write global.json");
+
+ assert!(parse_global_json_mtp_mode(&global_json));
+ }
+
+ #[test]
+ fn test_vstest_bridge_injects_report_trx_after_separator() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let csproj = temp_dir.path().join("MTP.Tests.csproj");
+ fs::write(
+ &csproj,
+ r#"
+
+ true
+
+ "#,
+ )
+ .expect("write csproj");
+
+ let args = vec![csproj.display().to_string()];
+ assert_eq!(
+ detect_test_runner_mode(&args),
+ TestRunnerMode::MtpVsTestBridge
+ );
+
+ let binlog_path = Path::new("/tmp/test.binlog");
+ let injected = build_effective_dotnet_args("test", &args, binlog_path, None);
+
+ // VsTestBridge → inject -- --report-trx after user args
+ assert!(injected.contains(&"--".to_string()));
+ assert!(injected.contains(&"--report-trx".to_string()));
+ let sep_pos = injected.iter().position(|a| a == "--").unwrap();
+ let trx_pos = injected.iter().position(|a| a == "--report-trx").unwrap();
+ assert!(sep_pos < trx_pos);
+ // No VSTest logger
+ assert!(!injected.contains(&"--logger".to_string()));
+ }
+
+ #[test]
+ fn test_vstest_bridge_existing_separator_inserts_report_trx_after_it() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let csproj = temp_dir.path().join("MTP.Tests.csproj");
+ fs::write(
+ &csproj,
+ r#"
+
+ true
+
+ "#,
+ )
+ .expect("write csproj");
+
+ let args = vec![
+ csproj.display().to_string(),
+ "--".to_string(),
+ "--parallel".to_string(),
+ ];
+ let binlog_path = Path::new("/tmp/test.binlog");
+ let injected = build_effective_dotnet_args("test", &args, binlog_path, None);
+
+ // --report-trx inserted right after existing --
+ let sep_pos = injected.iter().position(|a| a == "--").unwrap();
+ assert_eq!(injected[sep_pos + 1], "--report-trx");
+ assert!(injected.contains(&"--parallel".to_string()));
+ }
+
+ #[test]
+ fn test_vstest_bridge_respects_existing_report_trx() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let csproj = temp_dir.path().join("MTP.Tests.csproj");
+ fs::write(
+ &csproj,
+ r#"
+
+ true
+
+ "#,
+ )
+ .expect("write csproj");
+
+ let args = vec![
+ csproj.display().to_string(),
+ "--".to_string(),
+ "--report-trx".to_string(),
+ ];
+ let binlog_path = Path::new("/tmp/test.binlog");
+ let injected = build_effective_dotnet_args("test", &args, binlog_path, None);
+
+ // Should not double-inject
+ assert_eq!(injected.iter().filter(|a| *a == "--report-trx").count(), 1);
+ }
+
+ #[test]
+ fn test_detect_mode_classic_csproj_injects_trx() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let csproj = temp_dir.path().join("Classic.Tests.csproj");
+ fs::write(
+ &csproj,
+ r#"
+
+ net9.0
+
+ "#,
+ )
+ .expect("write csproj");
+
+ let args = vec![csproj.display().to_string()];
+ assert_eq!(detect_test_runner_mode(&args), TestRunnerMode::Classic);
+
+ let binlog_path = Path::new("/tmp/test.binlog");
+ let trx_dir = Path::new("/tmp/test_results");
+ let injected = build_effective_dotnet_args("test", &args, binlog_path, Some(trx_dir));
+ assert!(injected.contains(&"--logger".to_string()));
+ assert!(injected.contains(&"trx".to_string()));
+ }
+
+ #[test]
+ fn test_detect_mode_directory_build_props_vstest_bridge() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let props = temp_dir.path().join("Directory.Build.props");
+ fs::write(
+ &props,
+ r#"
+
+ true
+
+ "#,
+ )
+ .expect("write Directory.Build.props");
+
+ assert_eq!(scan_mtp_kind_in_file(&props), MtpProjectKind::VsTestBridge);
+ }
+
+ #[test]
+ fn test_is_global_json_mtp_mode_detects_mtp_runner() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let global_json = temp_dir.path().join("global.json");
+ fs::write(
+ &global_json,
+ r#"{ "sdk": { "version": "10.0.100" }, "test": { "runner": "Microsoft.Testing.Platform" } }"#,
+ )
+ .expect("write global.json");
+
+ assert!(parse_global_json_mtp_mode(&global_json));
+ }
+
+ #[test]
+ fn test_is_global_json_mtp_mode_returns_false_for_vstest_runner() {
+ let temp_dir = tempfile::tempdir().expect("create temp dir");
+ let global_json = temp_dir.path().join("global.json");
+ fs::write(&global_json, r#"{ "sdk": { "version": "9.0.100" } }"#)
+ .expect("write global.json");
+
+ assert!(!parse_global_json_mtp_mode(&global_json));
+ }
+
#[test]
fn test_merge_test_summary_from_trx_uses_primary_and_cleans_file() {
let temp_dir = tempfile::tempdir().expect("create temp dir");
diff --git a/src/env_cmd.rs b/src/env_cmd.rs
index 4a2437c43..d4b9b6a38 100644
--- a/src/env_cmd.rs
+++ b/src/env_cmd.rs
@@ -62,7 +62,7 @@ pub fn run(filter: Option<&str>, show_all: bool, verbose: u8) -> Result<()> {
// Print categorized
if !path_vars.is_empty() {
- println!("📂 PATH Variables:");
+ println!("PATH Variables:");
for (k, v) in &path_vars {
if k == "PATH" {
// Split PATH for readability
@@ -81,28 +81,28 @@ pub fn run(filter: Option<&str>, show_all: bool, verbose: u8) -> Result<()> {
}
if !lang_vars.is_empty() {
- println!("\n🔧 Language/Runtime:");
+ println!("\nLanguage/Runtime:");
for (k, v) in &lang_vars {
println!(" {}={}", k, v);
}
}
if !cloud_vars.is_empty() {
- println!("\n☁️ Cloud/Services:");
+ println!("\nCloud/Services:");
for (k, v) in &cloud_vars {
println!(" {}={}", k, v);
}
}
if !tool_vars.is_empty() {
- println!("\n🛠️ Tools:");
+ println!("\nTools:");
for (k, v) in &tool_vars {
println!(" {}={}", k, v);
}
}
if !other_vars.is_empty() {
- println!("\n📋 Other:");
+ println!("\nOther:");
for (k, v) in other_vars.iter().take(20) {
println!(" {}={}", k, v);
}
@@ -118,7 +118,7 @@ pub fn run(filter: Option<&str>, show_all: bool, verbose: u8) -> Result<()> {
+ tool_vars.len()
+ other_vars.len().min(20);
if filter.is_none() {
- println!("\n📊 Total: {} vars (showing {} relevant)", total, shown);
+ println!("\nTotal: {} vars (showing {} relevant)", total, shown);
}
let raw: String = vars.iter().map(|(k, v)| format!("{}={}\n", k, v)).collect();
diff --git a/src/filter.rs b/src/filter.rs
index c4c255ad3..d6d9d19bc 100644
--- a/src/filter.rs
+++ b/src/filter.rs
@@ -50,6 +50,7 @@ pub enum Language {
Java,
Ruby,
Shell,
+ /// Data formats (JSON, YAML, TOML, XML, CSV) — no comment stripping
Data,
Unknown,
}
@@ -67,9 +68,10 @@ impl Language {
"java" => Language::Java,
"rb" => Language::Ruby,
"sh" | "bash" | "zsh" => Language::Shell,
- "json" | "jsonc" | "json5" | "yaml" | "yml" | "toml" | "xml" | "html" | "htm"
- | "css" | "scss" | "svg" | "md" | "markdown" | "txt" | "csv" | "tsv" | "env"
- | "ini" | "cfg" | "conf" | "lock" => Language::Data,
+ "json" | "jsonc" | "json5" | "yaml" | "yml" | "toml" | "xml" | "csv" | "tsv"
+ | "graphql" | "gql" | "sql" | "md" | "markdown" | "txt" | "env" | "lock" => {
+ Language::Data
+ }
_ => Language::Unknown,
}
}
@@ -249,6 +251,11 @@ lazy_static! {
impl FilterStrategy for AggressiveFilter {
fn filter(&self, content: &str, lang: &Language) -> String {
+ // Data formats (JSON, YAML, etc.) must never be code-filtered
+ if *lang == Language::Data {
+ return MinimalFilter.filter(content, lang);
+ }
+
let minimal = MinimalFilter.filter(content, lang);
let mut result = String::with_capacity(minimal.len() / 2);
let mut brace_depth = 0;
@@ -407,14 +414,15 @@ mod tests {
assert_eq!(Language::from_extension("yml"), Language::Data);
assert_eq!(Language::from_extension("toml"), Language::Data);
assert_eq!(Language::from_extension("xml"), Language::Data);
- assert_eq!(Language::from_extension("md"), Language::Data);
assert_eq!(Language::from_extension("csv"), Language::Data);
+ assert_eq!(Language::from_extension("md"), Language::Data);
assert_eq!(Language::from_extension("lock"), Language::Data);
}
#[test]
- fn test_data_files_no_comment_stripping() {
- // Regression test for #464: package.json with `/*` in strings
+ fn test_json_no_comment_stripping() {
+ // Reproduces #464: package.json with "packages/*" was corrupted
+ // because /* was treated as block comment start
let json = r#"{
"workspaces": {
"packages": [
@@ -432,17 +440,41 @@ mod tests {
}"#;
let filter = MinimalFilter;
let result = filter.filter(json, &Language::Data);
+ // All fields must be preserved — no comment stripping on JSON
+ assert!(
+ result.contains("packages/*"),
+ "packages/* should not be treated as block comment start"
+ );
assert!(
result.contains("scripts"),
- "scripts section must be preserved"
+ "scripts section must not be stripped"
);
assert!(
- result.contains("packages/*"),
- "glob pattern must be preserved"
+ result.contains("lint-staged"),
+ "lint-staged section must not be stripped"
);
assert!(
result.contains("**/package.json"),
- "glob pattern must be preserved"
+ "**/package.json should not be treated as block comment end"
+ );
+ }
+
+ #[test]
+ fn test_json_aggressive_filter_preserves_structure() {
+ let json = r#"{
+ "name": "my-app",
+ "dependencies": {
+ "react": "^18.0.0"
+ },
+ "scripts": {
+ "dev": "next dev /* not a comment */"
+ }
+}"#;
+ let filter = AggressiveFilter;
+ let result = filter.filter(json, &Language::Data);
+ assert!(
+ result.contains("/* not a comment */"),
+ "Aggressive filter must not strip comment-like patterns in JSON"
);
}
diff --git a/src/filters/bundle-install.toml b/src/filters/bundle-install.toml
new file mode 100644
index 000000000..80e074862
--- /dev/null
+++ b/src/filters/bundle-install.toml
@@ -0,0 +1,61 @@
+[filters.bundle-install]
+description = "Compact bundle install/update — strip 'Using' lines, keep installs and errors"
+match_command = "^bundle\\s+(install|update)\\b"
+strip_ansi = true
+strip_lines_matching = [
+ "^Using ",
+ "^\\s*$",
+ "^Fetching gem metadata",
+ "^Resolving dependencies",
+]
+match_output = [
+ { pattern = "Bundle complete!", message = "ok bundle: complete" },
+ { pattern = "Bundle updated!", message = "ok bundle: updated" },
+]
+max_lines = 30
+
+[[tests.bundle-install]]
+name = "all cached short-circuits"
+input = """
+Using bundler 2.5.6
+Using rake 13.1.0
+Using ast 2.4.2
+Using base64 0.2.0
+Using minitest 5.22.2
+Bundle complete! 85 Gemfile dependencies, 200 gems now installed.
+Use `bundle info [gemname]` to see where a bundled gem is installed.
+"""
+expected = "ok bundle: complete"
+
+[[tests.bundle-install]]
+name = "mixed install keeps Fetching and Installing lines"
+input = """
+Fetching gem metadata from https://rubygems.org/.........
+Resolving dependencies...
+Using rake 13.1.0
+Using ast 2.4.2
+Fetching rspec 3.13.0
+Installing rspec 3.13.0
+Using rubocop 1.62.0
+Fetching simplecov 0.22.0
+Installing simplecov 0.22.0
+Bundle complete! 85 Gemfile dependencies, 202 gems now installed.
+"""
+expected = "ok bundle: complete"
+
+[[tests.bundle-install]]
+name = "update output"
+input = """
+Fetching gem metadata from https://rubygems.org/.........
+Resolving dependencies...
+Using rake 13.1.0
+Fetching rspec 3.14.0 (was 3.13.0)
+Installing rspec 3.14.0 (was 3.13.0)
+Bundle updated!
+"""
+expected = "ok bundle: updated"
+
+[[tests.bundle-install]]
+name = "empty output"
+input = ""
+expected = ""
diff --git a/src/filters/gradle.toml b/src/filters/gradle.toml
new file mode 100644
index 000000000..e6ad28a3e
--- /dev/null
+++ b/src/filters/gradle.toml
@@ -0,0 +1,35 @@
+[filters.gradle]
+description = "Compact Gradle build output — strip progress, keep tasks and errors"
+match_command = "^(gradle|gradlew|\\./)gradlew?\\b"
+strip_ansi = true
+strip_lines_matching = [
+ "^\\s*$",
+ "^> Configuring project",
+ "^> Resolving dependencies",
+ "^> Transform ",
+ "^Download(ing)?\\s+http",
+ "^\\s*<-+>\\s*$",
+ "^> Task :.*UP-TO-DATE$",
+ "^> Task :.*NO-SOURCE$",
+ "^> Task :.*FROM-CACHE$",
+ "^Starting a Gradle Daemon",
+ "^Daemon will be stopped",
+]
+truncate_lines_at = 150
+max_lines = 50
+on_empty = "gradle: ok"
+
+[[tests.gradle]]
+name = "strips UP-TO-DATE tasks, keeps build result"
+input = "> Configuring project :app\n> Task :app:compileJava UP-TO-DATE\n> Task :app:compileKotlin UP-TO-DATE\n> Task :app:test\n\n3 tests completed, 1 failed\n\nBUILD FAILED in 12s"
+expected = "> Task :app:test\n3 tests completed, 1 failed\nBUILD FAILED in 12s"
+
+[[tests.gradle]]
+name = "clean build preserved"
+input = "BUILD SUCCESSFUL in 8s\n7 actionable tasks: 7 executed"
+expected = "BUILD SUCCESSFUL in 8s\n7 actionable tasks: 7 executed"
+
+[[tests.gradle]]
+name = "empty after stripping"
+input = "> Configuring project :app\n"
+expected = "gradle: ok"
diff --git a/src/filters/jira.toml b/src/filters/jira.toml
new file mode 100644
index 000000000..9de5ad3ba
--- /dev/null
+++ b/src/filters/jira.toml
@@ -0,0 +1,20 @@
+[filters.jira]
+description = "Compact Jira CLI output — strip verbose metadata, keep essentials"
+match_command = "^jira\\b"
+strip_ansi = true
+strip_lines_matching = [
+ "^\\s*$",
+ "^\\s*--",
+]
+truncate_lines_at = 120
+max_lines = 40
+
+[[tests.jira]]
+name = "strips blank lines from issue list"
+input = "TYPE\tKEY\tSUMMARY\tSTATUS\n\nStory\tPROJ-123\tAdd login feature\tIn Progress\n\nBug\tPROJ-456\tFix crash on startup\tOpen"
+expected = "TYPE\tKEY\tSUMMARY\tSTATUS\nStory\tPROJ-123\tAdd login feature\tIn Progress\nBug\tPROJ-456\tFix crash on startup\tOpen"
+
+[[tests.jira]]
+name = "single issue view"
+input = "KEY: PROJ-123\nSummary: Add login feature\nStatus: In Progress\nAssignee: john@example.com"
+expected = "KEY: PROJ-123\nSummary: Add login feature\nStatus: In Progress\nAssignee: john@example.com"
diff --git a/src/filters/just.toml b/src/filters/just.toml
new file mode 100644
index 000000000..31e58a542
--- /dev/null
+++ b/src/filters/just.toml
@@ -0,0 +1,26 @@
+[filters.just]
+description = "Compact just task runner output — strip recipe headers, keep command output"
+match_command = "^just\\b"
+strip_ansi = true
+strip_lines_matching = [
+ "^\\s*$",
+ "^\\s*Available recipes:",
+ "^\\s*just --list",
+]
+truncate_lines_at = 150
+max_lines = 50
+
+[[tests.just]]
+name = "preserves command output"
+input = "cargo test\n\ntest result: ok. 42 passed; 0 failed\n"
+expected = "cargo test\ntest result: ok. 42 passed; 0 failed"
+
+[[tests.just]]
+name = "preserves error output"
+input = "error: Compilation failed\nsrc/main.rs:10: expected `;`"
+expected = "error: Compilation failed\nsrc/main.rs:10: expected `;`"
+
+[[tests.just]]
+name = "empty input"
+input = ""
+expected = ""
diff --git a/src/filters/mise.toml b/src/filters/mise.toml
new file mode 100644
index 000000000..7223d12b3
--- /dev/null
+++ b/src/filters/mise.toml
@@ -0,0 +1,30 @@
+[filters.mise]
+description = "Compact mise task runner output — strip status lines, keep task results"
+match_command = "^mise\\s+(run|exec|install|upgrade)\\b"
+strip_ansi = true
+strip_lines_matching = [
+ "^\\s*$",
+ "^mise\\s+(trust|install|upgrade).*✓",
+ "^mise\\s+Installing\\s",
+ "^mise\\s+Downloading\\s",
+ "^mise\\s+Extracting\\s",
+ "^mise\\s+\\w+@[\\d.]+ installed",
+]
+truncate_lines_at = 150
+max_lines = 50
+on_empty = "mise: ok"
+
+[[tests.mise]]
+name = "strips install noise, keeps task output"
+input = "mise Installing node@20.0.0\nmise Downloading node@20.0.0\nmise Extracting node@20.0.0\nmise node@20.0.0 installed\n\nlint check passed\n2 warnings found"
+expected = "lint check passed\n2 warnings found"
+
+[[tests.mise]]
+name = "preserves error output"
+input = "mise run lint\nError: biome check failed\nsrc/index.ts:5 — unused variable"
+expected = "mise run lint\nError: biome check failed\nsrc/index.ts:5 — unused variable"
+
+[[tests.mise]]
+name = "empty after stripping"
+input = "mise trust ~/dev/.mise.toml ✓\nmise install node@20 ✓\n"
+expected = "mise: ok"
diff --git a/src/filters/nx.toml b/src/filters/nx.toml
new file mode 100644
index 000000000..d42dfb76e
--- /dev/null
+++ b/src/filters/nx.toml
@@ -0,0 +1,25 @@
+[filters.nx]
+description = "Compact Nx monorepo output — strip task graph noise, keep results"
+match_command = "^(pnpm\\s+)?nx\\b"
+strip_ansi = true
+strip_lines_matching = [
+ "^\\s*$",
+ "^\\s*>\\s*NX\\s+Running target",
+ "^\\s*>\\s*NX\\s+Nx read the output",
+ "^\\s*>\\s*NX\\s+View logs",
+ "^———————",
+ "^—————————",
+ "^\\s+Nx \\(powered by",
+]
+truncate_lines_at = 150
+max_lines = 60
+
+[[tests.nx]]
+name = "strips Nx noise, keeps build output"
+input = "\n > NX Running target build for project myapp\n\n———————————————————————————————————————\nCompiled successfully.\nOutput: dist/apps/myapp\n\n > NX View logs at /tmp/.nx/runs/abc123\n\n Nx (powered by computation caching)\n"
+expected = "Compiled successfully.\nOutput: dist/apps/myapp"
+
+[[tests.nx]]
+name = "preserves error output"
+input = "ERROR: Cannot find module '@myapp/shared'\n\n > NX Running target build for project myapp\n\nFailed at step: build"
+expected = "ERROR: Cannot find module '@myapp/shared'\nFailed at step: build"
diff --git a/src/filters/ollama.toml b/src/filters/ollama.toml
new file mode 100644
index 000000000..e325ec948
--- /dev/null
+++ b/src/filters/ollama.toml
@@ -0,0 +1,23 @@
+[filters.ollama]
+description = "Strip ANSI spinners and cursor control from ollama output, keep final text"
+match_command = "^ollama\\s+run\\b"
+strip_ansi = true
+strip_lines_matching = [
+ "^[⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏\\s]*$",
+ "^\\s*$",
+]
+
+[[tests.ollama]]
+name = "strips spinner lines, keeps response"
+input = "⠋ \n⠙ \n⠹ \nHello! How can I help you today?"
+expected = "Hello! How can I help you today?"
+
+[[tests.ollama]]
+name = "preserves multi-line response"
+input = "⠋ \n⠙ \nLine one of the response.\nLine two of the response."
+expected = "Line one of the response.\nLine two of the response."
+
+[[tests.ollama]]
+name = "empty input"
+input = ""
+expected = ""
diff --git a/src/filters/spring-boot.toml b/src/filters/spring-boot.toml
new file mode 100644
index 000000000..5ec03e58c
--- /dev/null
+++ b/src/filters/spring-boot.toml
@@ -0,0 +1,28 @@
+[filters.spring-boot]
+description = "Compact Spring Boot output — strip banner and verbose startup logs, keep key events"
+match_command = "^(mvn\\s+spring-boot:run|java\\s+-jar.*\\.jar|gradle\\s+.*bootRun)"
+strip_ansi = true
+keep_lines_matching = [
+ "Started\\s.*\\sin\\s",
+ "Tomcat started on port",
+ "ERROR",
+ "WARN",
+ "Exception",
+ "Caused by:",
+ "Application run failed",
+ "BUILD\\s",
+ "Tests run:",
+ "FAILURE",
+ "listening on port",
+]
+max_lines = 30
+
+[[tests.spring-boot]]
+name = "keeps startup summary and errors"
+input = " . ____ _ \n /\\\\ / ___'_ __ _ _(_)_ __ \n( ( )\\___ | '_ | '_| | '_ \\ \n \\/ ___)| |_)| | | | | || )\n ' |____| .__|_| |_|_| |_\\__|\n :: Spring Boot :: (v3.2.0)\n2024-01-01 INFO Initializing Spring\n2024-01-01 INFO Bean 'dataSource' created\n2024-01-01 INFO Tomcat started on port 8080\n2024-01-01 INFO Started MyApp in 3.2 seconds"
+expected = "2024-01-01 INFO Tomcat started on port 8080\n2024-01-01 INFO Started MyApp in 3.2 seconds"
+
+[[tests.spring-boot]]
+name = "preserves errors"
+input = " :: Spring Boot :: (v3.2.0)\n2024-01-01 INFO Initializing Spring\n2024-01-01 ERROR Application run failed\nCaused by: java.lang.NullPointerException"
+expected = "2024-01-01 ERROR Application run failed\nCaused by: java.lang.NullPointerException"
diff --git a/src/filters/stat.toml b/src/filters/stat.toml
index 24d9d946b..8c240c05b 100644
--- a/src/filters/stat.toml
+++ b/src/filters/stat.toml
@@ -1,21 +1,17 @@
[filters.stat]
-description = "Compact stat output — strip blank lines"
+description = "Compact stat output — strip device/inode/birth noise"
match_command = "^stat\\b"
strip_ansi = true
strip_lines_matching = [
"^\\s*$",
+ "^\\s*Device:",
+ "^\\s*Birth:",
]
-max_lines = 30
+truncate_lines_at = 120
+max_lines = 20
[[tests.stat]]
-name = "macOS stat output kept"
-input = """
-16777234 8690244974 -rw-r--r-- 1 patrick staff 0 12345 "Mar 10 12:00:00 2026" "Mar 10 11:00:00 2026" "Mar 10 11:00:00 2026" "Mar 9 10:00:00 2026" 4096 24 0 file.txt
-"""
-expected = "16777234 8690244974 -rw-r--r-- 1 patrick staff 0 12345 \"Mar 10 12:00:00 2026\" \"Mar 10 11:00:00 2026\" \"Mar 10 11:00:00 2026\" \"Mar 9 10:00:00 2026\" 4096 24 0 file.txt"
-
-[[tests.stat]]
-name = "linux stat output kept"
+name = "linux stat output strips device and birth"
input = """
File: main.rs
Size: 12345 Blocks: 24 IO Block: 4096 regular file
@@ -26,7 +22,21 @@ Modify: 2026-03-10 11:00:00.000000000 +0100
Change: 2026-03-10 11:00:00.000000000 +0100
Birth: 2026-03-09 10:00:00.000000000 +0100
"""
-expected = " File: main.rs\n Size: 12345 Blocks: 24 IO Block: 4096 regular file\nDevice: 801h/2049d Inode: 1234567 Links: 1\nAccess: (0644/-rw-r--r--) Uid: ( 1000/ patrick) Gid: ( 1000/ patrick)\nAccess: 2026-03-10 12:00:00.000000000 +0100\nModify: 2026-03-10 11:00:00.000000000 +0100\nChange: 2026-03-10 11:00:00.000000000 +0100\n Birth: 2026-03-09 10:00:00.000000000 +0100"
+expected = " File: main.rs\n Size: 12345 Blocks: 24 IO Block: 4096 regular file\nAccess: (0644/-rw-r--r--) Uid: ( 1000/ patrick) Gid: ( 1000/ patrick)\nAccess: 2026-03-10 12:00:00.000000000 +0100\nModify: 2026-03-10 11:00:00.000000000 +0100\nChange: 2026-03-10 11:00:00.000000000 +0100"
+
+[[tests.stat]]
+name = "macOS stat -x strips device and birth"
+input = """
+ File: "main.rs"
+ Size: 82848 FileType: Regular File
+ Mode: (0644/-rw-r--r--) Uid: ( 501/ patrick) Gid: ( 20/ staff)
+Device: 1,15 Inode: 66302332 Links: 1
+Access: Wed Mar 18 21:21:15 2026
+Modify: Wed Mar 18 20:56:11 2026
+Change: Wed Mar 18 20:56:11 2026
+ Birth: Wed Mar 18 20:56:11 2026
+"""
+expected = " File: \"main.rs\"\n Size: 82848 FileType: Regular File\n Mode: (0644/-rw-r--r--) Uid: ( 501/ patrick) Gid: ( 20/ staff)\nAccess: Wed Mar 18 21:21:15 2026\nModify: Wed Mar 18 20:56:11 2026\nChange: Wed Mar 18 20:56:11 2026"
[[tests.stat]]
name = "empty input passes through"
diff --git a/src/filters/task.toml b/src/filters/task.toml
new file mode 100644
index 000000000..31868fc06
--- /dev/null
+++ b/src/filters/task.toml
@@ -0,0 +1,27 @@
+[filters.task]
+description = "Compact go-task output — strip task headers, keep command results"
+match_command = "^task\\b"
+strip_ansi = true
+strip_lines_matching = [
+ "^\\s*$",
+ "^task: \\[.*\\] ",
+ "^task: Task .* is up to date",
+]
+truncate_lines_at = 150
+max_lines = 50
+on_empty = "task: ok"
+
+[[tests.task]]
+name = "strips task headers, keeps output"
+input = "task: [build] go build ./...\n\ntask: [test] go test ./...\nok myapp 0.5s\n\ntask: Task \"lint\" is up to date"
+expected = "ok myapp 0.5s"
+
+[[tests.task]]
+name = "preserves error output"
+input = "task: [build] go build ./...\n./main.go:10: undefined: foo\ntask: Failed to run task \"build\": exit status 1"
+expected = "./main.go:10: undefined: foo\ntask: Failed to run task \"build\": exit status 1"
+
+[[tests.task]]
+name = "all up to date"
+input = "task: Task \"build\" is up to date\ntask: Task \"lint\" is up to date\n"
+expected = "task: ok"
diff --git a/src/filters/turbo.toml b/src/filters/turbo.toml
new file mode 100644
index 000000000..c5a09acf0
--- /dev/null
+++ b/src/filters/turbo.toml
@@ -0,0 +1,30 @@
+[filters.turbo]
+description = "Compact Turborepo output — strip cache status noise, keep task results"
+match_command = "^turbo\\b"
+strip_ansi = true
+strip_lines_matching = [
+ "^\\s*$",
+ "^\\s*cache (hit|miss|bypass)",
+ "^\\s*\\d+ packages in scope",
+ "^\\s*Tasks:\\s+\\d+",
+ "^\\s*Duration:\\s+",
+ "^\\s*Remote caching (enabled|disabled)",
+]
+truncate_lines_at = 150
+max_lines = 50
+on_empty = "turbo: ok"
+
+[[tests.turbo]]
+name = "strips cache noise, keeps task output"
+input = " cache hit, replaying logs abc123\n cache miss, executing abc456\n\n3 packages in scope\n\n> myapp:build\n\nCompiled successfully.\n\nTasks: 2 successful, 2 total (1 cached)\nDuration: 3.2s"
+expected = "> myapp:build\nCompiled successfully."
+
+[[tests.turbo]]
+name = "preserves error output"
+input = "> myapp:lint\n\nError: src/index.ts(5,1): error TS2304\n\nTasks: 0 successful, 1 total\nDuration: 1.1s"
+expected = "> myapp:lint\nError: src/index.ts(5,1): error TS2304"
+
+[[tests.turbo]]
+name = "empty after stripping"
+input = " cache hit, replaying logs abc\n\n"
+expected = "turbo: ok"
diff --git a/src/filters/yadm.toml b/src/filters/yadm.toml
new file mode 100644
index 000000000..f2cd3d1a2
--- /dev/null
+++ b/src/filters/yadm.toml
@@ -0,0 +1,21 @@
+[filters.yadm]
+description = "Compact yadm (git wrapper) output — same filtering as git"
+match_command = "^yadm\\b"
+strip_ansi = true
+strip_lines_matching = [
+ "^\\s*$",
+ "^\\s*\\(use \"git ",
+ "^\\s*\\(use \"yadm ",
+]
+truncate_lines_at = 120
+max_lines = 40
+
+[[tests.yadm]]
+name = "strips hint lines"
+input = "On branch main\nYour branch is up to date with 'origin/main'.\n\n (use \"yadm add\" to update what will be committed)\n\nChanges not staged for commit:\n modified: .bashrc"
+expected = "On branch main\nYour branch is up to date with 'origin/main'.\nChanges not staged for commit:\n modified: .bashrc"
+
+[[tests.yadm]]
+name = "short output preserved"
+input = "Already up to date."
+expected = "Already up to date."
diff --git a/src/find_cmd.rs b/src/find_cmd.rs
index 25da54e2b..df1e41b2a 100644
--- a/src/find_cmd.rs
+++ b/src/find_cmd.rs
@@ -305,7 +305,7 @@ pub fn run(
let dirs_count = dirs.len();
let total_files = files.len();
- println!("📁 {}F {}D:", total_files, dirs_count);
+ println!("{}F {}D:", total_files, dirs_count);
println!();
// Display with proper --max limiting (count individual files)
diff --git a/src/format_cmd.rs b/src/format_cmd.rs
index c2de5d713..23c01a2bc 100644
--- a/src/format_cmd.rs
+++ b/src/format_cmd.rs
@@ -168,7 +168,7 @@ fn filter_black_output(output: &str) -> String {
// Split by comma to handle both parts
for part in trimmed.split(',') {
let part_lower = part.to_lowercase();
- let words: Vec<&str> = part.trim().split_whitespace().collect();
+ let words: Vec<&str> = part.split_whitespace().collect();
if part_lower.contains("would be reformatted") {
// Parse "X file(s) would be reformatted"
@@ -226,7 +226,7 @@ fn filter_black_output(output: &str) -> String {
if !needs_formatting && (all_done || files_unchanged > 0) {
// All files formatted correctly
- result.push_str("✓ Format (black): All files formatted");
+ result.push_str("Format (black): All files formatted");
if files_unchanged > 0 {
result.push_str(&format!(" ({} files checked)", files_unchanged));
}
@@ -258,13 +258,10 @@ fn filter_black_output(output: &str) -> String {
}
if files_unchanged > 0 {
- result.push_str(&format!(
- "\n✓ {} files already formatted\n",
- files_unchanged
- ));
+ result.push_str(&format!("\n{} files already formatted\n", files_unchanged));
}
- result.push_str("\n💡 Run `black .` to format these files\n");
+ result.push_str("\n[hint] Run `black .` to format these files\n");
} else {
// Fallback: show raw output
result.push_str(output.trim());
@@ -349,7 +346,7 @@ mod tests {
fn test_filter_black_all_formatted() {
let output = "All done! ✨ 🍰 ✨\n5 files left unchanged.";
let result = filter_black_output(output);
- assert!(result.contains("✓ Format (black)"));
+ assert!(result.contains("Format (black)"));
assert!(result.contains("All files formatted"));
assert!(result.contains("5 files checked"));
}
diff --git a/src/gain.rs b/src/gain.rs
index 2dce35f1a..bafdc0018 100644
--- a/src/gain.rs
+++ b/src/gain.rs
@@ -8,6 +8,7 @@ use serde::Serialize;
use std::io::IsTerminal;
use std::path::PathBuf;
+#[allow(clippy::too_many_arguments)]
pub fn run(
project: bool, // added: per-project scope flag
graph: bool,
@@ -108,7 +109,7 @@ pub fn run(
hook_check::HookStatus::Missing => {
eprintln!(
"{}",
- "⚠️ No hook installed — run `rtk init -g` for automatic token savings"
+ "[warn] No hook installed — run `rtk init -g` for automatic token savings"
.yellow()
);
eprintln!();
@@ -116,7 +117,7 @@ pub fn run(
hook_check::HookStatus::Outdated => {
eprintln!(
"{}",
- "⚠️ Hook outdated — run `rtk init -g` to update".yellow()
+ "[warn] Hook outdated — run `rtk init -g` to update".yellow()
);
eprintln!();
}
@@ -658,7 +659,7 @@ fn check_rtk_disabled_bypass() -> Option {
let pct = (bypassed as f64 / total_bash as f64) * 100.0;
if pct > 10.0 {
Some(format!(
- "⚠️ {} commands ({:.0}%) used RTK_DISABLED=1 unnecessarily — run `rtk discover` for details",
+ "[warn] {} commands ({:.0}%) used RTK_DISABLED=1 unnecessarily — run `rtk discover` for details",
bypassed, pct
))
} else {
diff --git a/src/gh_cmd.rs b/src/gh_cmd.rs
index 9e1fe7ec3..2477bbd62 100644
--- a/src/gh_cmd.rs
+++ b/src/gh_cmd.rs
@@ -193,8 +193,8 @@ fn run_pr(args: &[String], verbose: u8, ultra_compact: bool) -> Result<()> {
"create" => pr_create(&args[1..], verbose),
"merge" => pr_merge(&args[1..], verbose),
"diff" => pr_diff(&args[1..], verbose),
- "comment" => pr_action("commented", &args, verbose),
- "edit" => pr_action("edited", &args, verbose),
+ "comment" => pr_action("commented", args, verbose),
+ "edit" => pr_action("edited", args, verbose),
_ => run_passthrough("gh", "pr", args),
}
}
@@ -235,8 +235,8 @@ fn list_prs(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> {
filtered.push_str("PRs\n");
println!("PRs");
} else {
- filtered.push_str("📋 Pull Requests\n");
- println!("📋 Pull Requests");
+ filtered.push_str("Pull Requests\n");
+ println!("Pull Requests");
}
for pr in prs.iter().take(20) {
@@ -254,10 +254,10 @@ fn list_prs(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> {
}
} else {
match state {
- "OPEN" => "🟢",
- "MERGED" => "🟣",
- "CLOSED" => "🔴",
- _ => "⚪",
+ "OPEN" => "[open]",
+ "MERGED" => "[merged]",
+ "CLOSED" => "[closed]",
+ _ => "[unknown]",
}
};
@@ -286,7 +286,13 @@ fn list_prs(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> {
fn should_passthrough_pr_view(extra_args: &[String]) -> bool {
extra_args
.iter()
- .any(|a| a == "--json" || a == "--jq" || a == "--web")
+ .any(|a| a == "--json" || a == "--jq" || a == "--web" || a == "--comments")
+}
+
+fn should_passthrough_issue_view(extra_args: &[String]) -> bool {
+ extra_args
+ .iter()
+ .any(|a| a == "--json" || a == "--jq" || a == "--web" || a == "--comments")
}
fn view_pr(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> {
@@ -352,10 +358,10 @@ fn view_pr(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> {
}
} else {
match state {
- "OPEN" => "🟢",
- "MERGED" => "🟣",
- "CLOSED" => "🔴",
- _ => "⚪",
+ "OPEN" => "[open]",
+ "MERGED" => "[merged]",
+ "CLOSED" => "[closed]",
+ _ => "[unknown]",
}
};
@@ -368,8 +374,8 @@ fn view_pr(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> {
print!("{}", line);
let mergeable_str = match mergeable {
- "MERGEABLE" => "✓",
- "CONFLICTING" => "✗",
+ "MERGEABLE" => "[ok]",
+ "CONFLICTING" => "[x]",
_ => "?",
};
let line = format!(" {} | {}\n", state, mergeable_str);
@@ -417,11 +423,11 @@ fn view_pr(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> {
if ultra_compact {
if failed > 0 {
- let line = format!(" ✗{}/{} {} fail\n", passed, total, failed);
+ let line = format!(" [x]{}/{} {} fail\n", passed, total, failed);
filtered.push_str(&line);
print!("{}", line);
} else {
- let line = format!(" ✓{}/{}\n", passed, total);
+ let line = format!(" {}/{}\n", passed, total);
filtered.push_str(&line);
print!("{}", line);
}
@@ -430,7 +436,7 @@ fn view_pr(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> {
filtered.push_str(&line);
print!("{}", line);
if failed > 0 {
- let line = format!(" ⚠️ {} checks failed\n", failed);
+ let line = format!(" [warn] {} checks failed\n", failed);
filtered.push_str(&line);
print!("{}", line);
}
@@ -504,9 +510,9 @@ fn pr_checks(args: &[String], _verbose: u8, _ultra_compact: bool) -> Result<()>
let mut failed_checks = Vec::new();
for line in stdout.lines() {
- if line.contains('✓') || line.contains("pass") {
+ if line.contains("[ok]") || line.contains("pass") {
passed += 1;
- } else if line.contains('✗') || line.contains("fail") {
+ } else if line.contains("[x]") || line.contains("fail") {
failed += 1;
failed_checks.push(line.trim().to_string());
} else if line.contains('*') || line.contains("pending") {
@@ -516,20 +522,20 @@ fn pr_checks(args: &[String], _verbose: u8, _ultra_compact: bool) -> Result<()>
let mut filtered = String::new();
- let line = "🔍 CI Checks Summary:\n";
+ let line = "CI Checks Summary:\n";
filtered.push_str(line);
print!("{}", line);
- let line = format!(" ✅ Passed: {}\n", passed);
+ let line = format!(" [ok] Passed: {}\n", passed);
filtered.push_str(&line);
print!("{}", line);
- let line = format!(" ❌ Failed: {}\n", failed);
+ let line = format!(" [FAIL] Failed: {}\n", failed);
filtered.push_str(&line);
print!("{}", line);
if pending > 0 {
- let line = format!(" ⏳ Pending: {}\n", pending);
+ let line = format!(" [pending] Pending: {}\n", pending);
filtered.push_str(&line);
print!("{}", line);
}
@@ -581,7 +587,7 @@ fn pr_status(_verbose: u8, _ultra_compact: bool) -> Result<()> {
let mut filtered = String::new();
if let Some(created_by) = json["createdBy"].as_array() {
- let line = format!("📝 Your PRs ({}):\n", created_by.len());
+ let line = format!("Your PRs ({}):\n", created_by.len());
filtered.push_str(&line);
print!("{}", line);
for pr in created_by.iter().take(5) {
@@ -636,13 +642,8 @@ fn list_issues(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()>
let mut filtered = String::new();
if let Some(issues) = json.as_array() {
- if ultra_compact {
- filtered.push_str("Issues\n");
- println!("Issues");
- } else {
- filtered.push_str("🐛 Issues\n");
- println!("🐛 Issues");
- }
+ filtered.push_str("Issues\n");
+ println!("Issues");
for issue in issues.iter().take(20) {
let number = issue["number"].as_i64().unwrap_or(0);
let title = issue["title"].as_str().unwrap_or("???");
@@ -656,9 +657,9 @@ fn list_issues(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()>
}
} else {
if state == "OPEN" {
- "🟢"
+ "[open]"
} else {
- "🔴"
+ "[closed]"
}
};
let line = format!(" {} #{} {}\n", icon, number, truncate(title, 60));
@@ -685,6 +686,13 @@ fn view_issue(args: &[String], _verbose: u8) -> Result<()> {
None => return Err(anyhow::anyhow!("Issue number required")),
};
+ // Passthrough when --comments, --json, --jq, or --web is present.
+ // --comments changes the output to include comments which our JSON
+ // field list doesn't request, causing silent data loss.
+ if should_passthrough_issue_view(&extra_args) {
+ return run_passthrough_with_extra("gh", &["issue", "view", &issue_number], &extra_args);
+ }
+
let mut cmd = resolved_command("gh");
cmd.args([
"issue",
@@ -721,7 +729,11 @@ fn view_issue(args: &[String], _verbose: u8) -> Result<()> {
let author = json["author"]["login"].as_str().unwrap_or("???");
let url = json["url"].as_str().unwrap_or("");
- let icon = if state == "OPEN" { "🟢" } else { "🔴" };
+ let icon = if state == "OPEN" {
+ "[open]"
+ } else {
+ "[closed]"
+ };
let mut filtered = String::new();
@@ -814,8 +826,8 @@ fn list_runs(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> {
filtered.push_str("Runs\n");
println!("Runs");
} else {
- filtered.push_str("🏃 Workflow Runs\n");
- println!("🏃 Workflow Runs");
+ filtered.push_str("Workflow Runs\n");
+ println!("Workflow Runs");
}
for run in runs {
let id = run["databaseId"].as_i64().unwrap_or(0);
@@ -825,8 +837,8 @@ fn list_runs(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> {
let icon = if ultra_compact {
match conclusion {
- "success" => "✓",
- "failure" => "✗",
+ "success" => "[ok]",
+ "failure" => "[x]",
"cancelled" => "X",
_ => {
if status == "in_progress" {
@@ -838,14 +850,14 @@ fn list_runs(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> {
}
} else {
match conclusion {
- "success" => "✅",
- "failure" => "❌",
- "cancelled" => "🚫",
+ "success" => "[ok]",
+ "failure" => "[FAIL]",
+ "cancelled" => "[X]",
_ => {
if status == "in_progress" {
- "⏳"
+ "[time]"
} else {
- "⚪"
+ "[pending]"
}
}
}
@@ -910,7 +922,7 @@ fn view_run(args: &[String], _verbose: u8) -> Result<()> {
let mut filtered = String::new();
- let line = format!("🏃 Workflow Run #{}\n", run_id);
+ let line = format!("Workflow Run #{}\n", run_id);
filtered.push_str(&line);
print!("{}", line);
@@ -924,8 +936,8 @@ fn view_run(args: &[String], _verbose: u8) -> Result<()> {
// Skip successful jobs in compact mode
continue;
}
- if line.contains('✗') || line.contains("fail") {
- let formatted = format!(" ❌ {}\n", line.trim());
+ if line.contains("[x]") || line.contains("fail") {
+ let formatted = format!(" [FAIL] {}\n", line.trim());
filtered.push_str(&formatted);
print!("{}", formatted);
}
@@ -992,15 +1004,11 @@ fn run_repo(args: &[String], _verbose: u8, _ultra_compact: bool) -> Result<()> {
let forks = json["forkCount"].as_i64().unwrap_or(0);
let private = json["isPrivate"].as_bool().unwrap_or(false);
- let visibility = if private {
- "🔒 Private"
- } else {
- "🌐 Public"
- };
+ let visibility = if private { "[private]" } else { "[public]" };
let mut filtered = String::new();
- let line = format!("📦 {}/{}\n", owner, name);
+ let line = format!("{}/{}\n", owner, name);
filtered.push_str(&line);
print!("{}", line);
@@ -1014,7 +1022,7 @@ fn run_repo(args: &[String], _verbose: u8, _ultra_compact: bool) -> Result<()> {
print!("{}", line);
}
- let line = format!(" ⭐ {} stars | 🔱 {} forks\n", stars, forks);
+ let line = format!(" {} stars | {} forks\n", stars, forks);
filtered.push_str(&line);
print!("{}", line);
@@ -1110,6 +1118,18 @@ fn pr_merge(args: &[String], _verbose: u8) -> Result<()> {
Ok(())
}
+/// Flags that change `gh pr diff` output from unified diff to a different format.
+/// When present, compact_diff would produce empty output since it expects diff headers.
+fn has_non_diff_format_flag(args: &[String]) -> bool {
+ args.iter().any(|a| {
+ a == "--name-only"
+ || a == "--name-status"
+ || a == "--stat"
+ || a == "--numstat"
+ || a == "--shortstat"
+ })
+}
+
fn pr_diff(args: &[String], _verbose: u8) -> Result<()> {
// --no-compact: pass full diff through (gh CLI doesn't know this flag, strip it)
let no_compact = args.iter().any(|a| a == "--no-compact");
@@ -1119,7 +1139,9 @@ fn pr_diff(args: &[String], _verbose: u8) -> Result<()> {
.cloned()
.collect();
- if no_compact {
+ // Passthrough when --no-compact or when a format flag changes output away from
+ // unified diff (e.g. --name-only produces a filename list, not diff hunks).
+ if no_compact || has_non_diff_format_flag(&gh_args) {
return run_passthrough_with_extra("gh", &["pr", "diff"], &gh_args);
}
@@ -1493,8 +1515,81 @@ mod tests {
}
#[test]
- fn test_should_passthrough_pr_view_other_flags() {
- assert!(!should_passthrough_pr_view(&["--comments".into()]));
+ fn test_should_passthrough_pr_view_comments() {
+ assert!(should_passthrough_pr_view(&["--comments".into()]));
+ }
+
+ // --- should_passthrough_issue_view tests ---
+
+ #[test]
+ fn test_should_passthrough_issue_view_comments() {
+ assert!(should_passthrough_issue_view(&["--comments".into()]));
+ }
+
+ #[test]
+ fn test_should_passthrough_issue_view_json() {
+ assert!(should_passthrough_issue_view(&[
+ "--json".into(),
+ "body,comments".into()
+ ]));
+ }
+
+ #[test]
+ fn test_should_passthrough_issue_view_jq() {
+ assert!(should_passthrough_issue_view(&[
+ "--jq".into(),
+ ".body".into()
+ ]));
+ }
+
+ #[test]
+ fn test_should_passthrough_issue_view_web() {
+ assert!(should_passthrough_issue_view(&["--web".into()]));
+ }
+
+ #[test]
+ fn test_should_passthrough_issue_view_default() {
+ assert!(!should_passthrough_issue_view(&[]));
+ }
+
+ // --- has_non_diff_format_flag tests ---
+
+ #[test]
+ fn test_non_diff_format_flag_name_only() {
+ assert!(has_non_diff_format_flag(&["--name-only".into()]));
+ }
+
+ #[test]
+ fn test_non_diff_format_flag_stat() {
+ assert!(has_non_diff_format_flag(&["--stat".into()]));
+ }
+
+ #[test]
+ fn test_non_diff_format_flag_name_status() {
+ assert!(has_non_diff_format_flag(&["--name-status".into()]));
+ }
+
+ #[test]
+ fn test_non_diff_format_flag_numstat() {
+ assert!(has_non_diff_format_flag(&["--numstat".into()]));
+ }
+
+ #[test]
+ fn test_non_diff_format_flag_shortstat() {
+ assert!(has_non_diff_format_flag(&["--shortstat".into()]));
+ }
+
+ #[test]
+ fn test_non_diff_format_flag_absent() {
+ assert!(!has_non_diff_format_flag(&[]));
+ }
+
+ #[test]
+ fn test_non_diff_format_flag_regular_args() {
+ assert!(!has_non_diff_format_flag(&[
+ "123".into(),
+ "--color=always".into()
+ ]));
}
// --- filter_markdown_body tests ---
diff --git a/src/git.rs b/src/git.rs
index ade27f8ba..4bb7f6745 100644
--- a/src/git.rs
+++ b/src/git.rs
@@ -1,3 +1,4 @@
+use crate::config;
use crate::tracking;
use crate::utils::resolved_command;
use anyhow::{Context, Result};
@@ -76,6 +77,9 @@ fn run_diff(
let mut cmd = git_cmd(global_args);
cmd.arg("diff");
for arg in args {
+ if arg == "--no-compact" {
+ continue; // RTK flag, not a git flag
+ }
cmd.arg(arg);
}
@@ -111,6 +115,21 @@ fn run_diff(
let output = cmd.output().context("Failed to run git diff")?;
let stat_stdout = String::from_utf8_lossy(&output.stdout);
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ if !stderr.trim().is_empty() {
+ eprint!("{}", stderr);
+ }
+ let raw = stat_stdout.to_string();
+ timer.track(
+ &format!("git diff {}", args.join(" ")),
+ &format!("rtk git diff {}", args.join(" ")),
+ &raw,
+ &raw,
+ );
+ std::process::exit(output.status.code().unwrap_or(1));
+ }
+
if verbose > 0 {
eprintln!("Git diff summary:");
}
@@ -278,7 +297,8 @@ pub(crate) fn compact_diff(diff: &str, max_lines: usize) -> String {
let mut removed = 0;
let mut in_hunk = false;
let mut hunk_lines = 0;
- let max_hunk_lines = 30;
+ let max_hunk_lines = 100;
+ let mut was_truncated = false;
for line in diff.lines() {
if line.starts_with("diff --git") {
@@ -287,7 +307,7 @@ pub(crate) fn compact_diff(diff: &str, max_lines: usize) -> String {
result.push(format!(" +{} -{}", added, removed));
}
current_file = line.split(" b/").nth(1).unwrap_or("unknown").to_string();
- result.push(format!("\n📄 {}", current_file));
+ result.push(format!("\n{}", current_file));
added = 0;
removed = 0;
in_hunk = false;
@@ -321,11 +341,13 @@ pub(crate) fn compact_diff(diff: &str, max_lines: usize) -> String {
if hunk_lines == max_hunk_lines {
result.push(" ... (truncated)".to_string());
hunk_lines += 1;
+ was_truncated = true;
}
}
if result.len() >= max_lines {
result.push("\n... (more changes truncated)".to_string());
+ was_truncated = true;
break;
}
}
@@ -334,6 +356,10 @@ pub(crate) fn compact_diff(diff: &str, max_lines: usize) -> String {
result.push(format!(" +{} -{}", added, removed));
}
+ if was_truncated {
+ result.push("[full diff: rtk git diff --no-compact]".to_string());
+ }
+
result.join("\n")
}
@@ -355,7 +381,7 @@ fn run_log(
// Check if user provided limit flag (-N, -n N, --max-count=N, --max-count N)
let has_limit_flag = args.iter().any(|arg| {
- (arg.starts_with('-') && arg.chars().nth(1).map_or(false, |c| c.is_ascii_digit()))
+ (arg.starts_with('-') && arg.chars().nth(1).is_some_and(|c| c.is_ascii_digit()))
|| arg == "-n"
|| arg.starts_with("--max-count")
});
@@ -433,7 +459,7 @@ fn parse_user_limit(args: &[String]) -> Option {
// -20 (combined digit form)
if arg.starts_with('-')
&& arg.len() > 1
- && arg.chars().nth(1).map_or(false, |c| c.is_ascii_digit())
+ && arg.chars().nth(1).is_some_and(|c| c.is_ascii_digit())
{
if let Ok(n) = arg[1..].parse::() {
return Some(n);
@@ -506,17 +532,25 @@ fn filter_log_output(
Some(h) => truncate_line(h.trim(), truncate_width),
None => continue,
};
- // Remaining lines are the body — keep first non-empty line only
- let body_line = lines.map(|l| l.trim()).find(|l| {
- !l.is_empty() && !l.starts_with("Signed-off-by:") && !l.starts_with("Co-authored-by:")
- });
-
- match body_line {
- Some(body) => {
- let truncated_body = truncate_line(body, truncate_width);
- result.push(format!("{}\n {}", header, truncated_body));
+ // Remaining lines are the body — keep up to 3 non-empty, non-trailer lines
+ let body_lines: Vec<&str> = lines
+ .map(|l| l.trim())
+ .filter(|l| {
+ !l.is_empty()
+ && !l.starts_with("Signed-off-by:")
+ && !l.starts_with("Co-authored-by:")
+ })
+ .take(3)
+ .collect();
+
+ if body_lines.is_empty() {
+ result.push(header);
+ } else {
+ let mut entry = header;
+ for body in &body_lines {
+ entry.push_str(&format!("\n {}", truncate_line(body, truncate_width)));
}
- None => result.push(header),
+ result.push(entry);
}
}
@@ -547,7 +581,7 @@ fn format_status_output(porcelain: &str) -> String {
if let Some(branch_line) = lines.first() {
if branch_line.starts_with("##") {
let branch = branch_line.trim_start_matches("## ");
- output.push_str(&format!("📌 {}\n", branch));
+ output.push_str(&format!("* {}\n", branch));
}
}
@@ -592,38 +626,56 @@ fn format_status_output(porcelain: &str) -> String {
}
// Build summary
+ let limits = config::limits();
+ let max_files = limits.status_max_files;
+ let max_untracked = limits.status_max_untracked;
+
if staged > 0 {
- output.push_str(&format!("✅ Staged: {} files\n", staged));
- for f in staged_files.iter().take(5) {
+ output.push_str(&format!("+ Staged: {} files\n", staged));
+ for f in staged_files.iter().take(max_files) {
output.push_str(&format!(" {}\n", f));
}
- if staged_files.len() > 5 {
- output.push_str(&format!(" ... +{} more\n", staged_files.len() - 5));
+ if staged_files.len() > max_files {
+ output.push_str(&format!(
+ " ... +{} more\n",
+ staged_files.len() - max_files
+ ));
}
}
if modified > 0 {
- output.push_str(&format!("📝 Modified: {} files\n", modified));
- for f in modified_files.iter().take(5) {
+ output.push_str(&format!("~ Modified: {} files\n", modified));
+ for f in modified_files.iter().take(max_files) {
output.push_str(&format!(" {}\n", f));
}
- if modified_files.len() > 5 {
- output.push_str(&format!(" ... +{} more\n", modified_files.len() - 5));
+ if modified_files.len() > max_files {
+ output.push_str(&format!(
+ " ... +{} more\n",
+ modified_files.len() - max_files
+ ));
}
}
if untracked > 0 {
- output.push_str(&format!("❓ Untracked: {} files\n", untracked));
- for f in untracked_files.iter().take(3) {
+ output.push_str(&format!("? Untracked: {} files\n", untracked));
+ for f in untracked_files.iter().take(max_untracked) {
output.push_str(&format!(" {}\n", f));
}
- if untracked_files.len() > 3 {
- output.push_str(&format!(" ... +{} more\n", untracked_files.len() - 3));
+ if untracked_files.len() > max_untracked {
+ output.push_str(&format!(
+ " ... +{} more\n",
+ untracked_files.len() - max_untracked
+ ));
}
}
if conflicts > 0 {
- output.push_str(&format!("⚠️ Conflicts: {} files\n", conflicts));
+ output.push_str(&format!("conflicts: {} files\n", conflicts));
+ }
+
+ // When working tree is clean (only branch line, no changes)
+ if staged == 0 && modified == 0 && untracked == 0 && conflicts == 0 {
+ output.push_str("clean — nothing to commit\n");
}
output.trim_end().to_string()
@@ -660,7 +712,7 @@ fn filter_status_with_args(output: &str) -> String {
}
if result.is_empty() {
- "ok ✓".to_string()
+ "ok".to_string()
} else {
result.join("\n")
}
@@ -680,6 +732,20 @@ fn run_status(args: &[String], verbose: u8, global_args: &[String]) -> Result<()
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
+ if !output.status.success() {
+ if !stderr.trim().is_empty() {
+ eprint!("{}", stderr);
+ }
+ let raw = stdout.to_string();
+ timer.track(
+ &format!("git status {}", args.join(" ")),
+ &format!("rtk git status {}", args.join(" ")),
+ &raw,
+ &raw,
+ );
+ std::process::exit(output.status.code().unwrap_or(1));
+ }
+
if verbose > 0 || !stderr.is_empty() {
eprint!("{}", stderr);
}
@@ -772,9 +838,9 @@ fn run_add(args: &[String], verbose: u8, global_args: &[String]) -> Result<()> {
// Parse "1 file changed, 5 insertions(+)" format
let short = stat.lines().last().unwrap_or("").trim();
if short.is_empty() {
- "ok ✓".to_string()
+ "ok".to_string()
} else {
- format!("ok ✓ {}", short)
+ format!("ok {}", short)
}
};
@@ -833,17 +899,17 @@ fn run_commit(args: &[String], verbose: u8, global_args: &[String]) -> Result<()
// Extract commit hash from output like "[main abc1234] message"
let compact = if let Some(line) = stdout.lines().next() {
if let Some(hash_start) = line.find(' ') {
- let hash = line[1..hash_start].split(' ').last().unwrap_or("");
+ let hash = line[1..hash_start].split(' ').next_back().unwrap_or("");
if !hash.is_empty() && hash.len() >= 7 {
- format!("ok ✓ {}", &hash[..7.min(hash.len())])
+ format!("ok {}", &hash[..7.min(hash.len())])
} else {
- "ok ✓".to_string()
+ "ok".to_string()
}
} else {
- "ok ✓".to_string()
+ "ok".to_string()
}
} else {
- "ok ✓".to_string()
+ "ok".to_string()
};
println!("{}", compact);
@@ -859,13 +925,14 @@ fn run_commit(args: &[String], verbose: u8, global_args: &[String]) -> Result<()
"ok (nothing to commit)",
);
} else {
- eprintln!("FAILED: git commit");
if !stderr.trim().is_empty() {
- eprintln!("{}", stderr);
+ eprint!("{}", stderr);
}
if !stdout.trim().is_empty() {
- eprintln!("{}", stdout);
+ eprint!("{}", stdout);
}
+ timer.track(&original_cmd, "rtk git commit", &raw_output, &raw_output);
+ std::process::exit(output.status.code().unwrap_or(1));
}
}
@@ -900,7 +967,7 @@ fn run_push(args: &[String], verbose: u8, global_args: &[String]) -> Result<()>
if line.contains("->") {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() >= 3 {
- result = format!("ok ✓ {}", parts[parts.len() - 1]);
+ result = format!("ok {}", parts[parts.len() - 1]);
break;
}
}
@@ -908,7 +975,7 @@ fn run_push(args: &[String], verbose: u8, global_args: &[String]) -> Result<()>
if !result.is_empty() {
result
} else {
- "ok ✓".to_string()
+ "ok".to_string()
}
};
@@ -992,9 +1059,9 @@ fn run_pull(args: &[String], verbose: u8, global_args: &[String]) -> Result<()>
}
if files > 0 {
- format!("ok ✓ {} files +{} -{}", files, insertions, deletions)
+ format!("ok {} files +{} -{}", files, insertions, deletions)
} else {
- "ok ✓".to_string()
+ "ok".to_string()
}
};
@@ -1027,10 +1094,23 @@ fn run_branch(args: &[String], verbose: u8, global_args: &[String]) -> Result<()
eprintln!("git branch");
}
- // Detect write operations: delete, rename, copy
- let has_action_flag = args
- .iter()
- .any(|a| a == "-d" || a == "-D" || a == "-m" || a == "-M" || a == "-c" || a == "-C");
+ // Detect write operations: delete, rename, copy, upstream tracking
+ let has_action_flag = args.iter().any(|a| {
+ a == "-d"
+ || a == "-D"
+ || a == "-m"
+ || a == "-M"
+ || a == "-c"
+ || a == "-C"
+ || a == "--set-upstream-to"
+ || a.starts_with("--set-upstream-to=")
+ || a == "-u"
+ || a == "--unset-upstream"
+ || a == "--edit-description"
+ });
+
+ // Detect flags that produce specific output (not a branch list)
+ let has_show_flag = args.iter().any(|a| a == "--show-current");
// Detect list-mode flags
let has_list_flag = args.iter().any(|a| {
@@ -1043,11 +1123,49 @@ fn run_branch(args: &[String], verbose: u8, global_args: &[String]) -> Result<()
|| a == "--no-merged"
|| a == "--contains"
|| a == "--no-contains"
+ || a == "--format"
+ || a.starts_with("--format=")
+ || a == "--sort"
+ || a.starts_with("--sort=")
+ || a == "--points-at"
+ || a.starts_with("--points-at=")
});
// Detect positional arguments (not flags) — indicates branch creation
let has_positional_arg = args.iter().any(|a| !a.starts_with('-'));
+ // --show-current: passthrough with raw stdout (not "ok ✓")
+ if has_show_flag {
+ let mut cmd = git_cmd(global_args);
+ cmd.arg("branch");
+ for arg in args {
+ cmd.arg(arg);
+ }
+ let output = cmd.output().context("Failed to run git branch")?;
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ let combined = format!("{}{}", stdout, stderr);
+
+ let trimmed = stdout.trim();
+ timer.track(
+ &format!("git branch {}", args.join(" ")),
+ &format!("rtk git branch {}", args.join(" ")),
+ &combined,
+ trimmed,
+ );
+
+ if output.status.success() {
+ println!("{}", trimmed);
+ } else {
+ eprintln!("FAILED: git branch {}", args.join(" "));
+ if !stderr.trim().is_empty() {
+ eprintln!("{}", stderr);
+ }
+ std::process::exit(output.status.code().unwrap_or(1));
+ }
+ return Ok(());
+ }
+
// Write operation: action flags, or positional args without list flags (= branch creation)
if has_action_flag || (has_positional_arg && !has_list_flag) {
let mut cmd = git_cmd(global_args);
@@ -1061,7 +1179,7 @@ fn run_branch(args: &[String], verbose: u8, global_args: &[String]) -> Result<()
let combined = format!("{}{}", stdout, stderr);
let msg = if output.status.success() {
- "ok ✓"
+ "ok"
} else {
&combined
};
@@ -1074,7 +1192,7 @@ fn run_branch(args: &[String], verbose: u8, global_args: &[String]) -> Result<()
);
if output.status.success() {
- println!("ok ✓");
+ println!("ok");
} else {
eprintln!("FAILED: git branch {}", args.join(" "));
if !stderr.trim().is_empty() {
@@ -1103,6 +1221,20 @@ fn run_branch(args: &[String], verbose: u8, global_args: &[String]) -> Result<()
let stdout = String::from_utf8_lossy(&output.stdout);
let raw = stdout.to_string();
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ if !stderr.trim().is_empty() {
+ eprint!("{}", stderr);
+ }
+ timer.track(
+ &format!("git branch {}", args.join(" ")),
+ &format!("rtk git branch {}", args.join(" ")),
+ &raw,
+ &raw,
+ );
+ std::process::exit(output.status.code().unwrap_or(1));
+ }
+
let filtered = filter_branch_output(&stdout);
println!("{}", filtered);
@@ -1303,7 +1435,42 @@ fn run_stash(
std::process::exit(output.status.code().unwrap_or(1));
}
}
- _ => {
+ Some(sub) => {
+ // Unrecognized subcommand: passthrough to git stash [args]
+ let mut cmd = git_cmd(global_args);
+ cmd.args(["stash", sub]);
+ for arg in args {
+ cmd.arg(arg);
+ }
+ let output = cmd.output().context("Failed to run git stash")?;
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ let combined = format!("{}{}", stdout, stderr);
+
+ let msg = if output.status.success() {
+ let msg = format!("ok stash {}", sub);
+ println!("{}", msg);
+ msg
+ } else {
+ eprintln!("FAILED: git stash {}", sub);
+ if !stderr.trim().is_empty() {
+ eprintln!("{}", stderr);
+ }
+ combined.clone()
+ };
+
+ timer.track(
+ &format!("git stash {}", sub),
+ &format!("rtk git stash {}", sub),
+ &combined,
+ &msg,
+ );
+
+ if !output.status.success() {
+ std::process::exit(output.status.code().unwrap_or(1));
+ }
+ }
+ None => {
// Default: git stash (push)
let mut cmd = git_cmd(global_args);
cmd.arg("stash");
@@ -1389,7 +1556,7 @@ fn run_worktree(args: &[String], verbose: u8, global_args: &[String]) -> Result<
let combined = format!("{}{}", stdout, stderr);
let msg = if output.status.success() {
- "ok ✓"
+ "ok"
} else {
&combined
};
@@ -1402,7 +1569,7 @@ fn run_worktree(args: &[String], verbose: u8, global_args: &[String]) -> Result<
);
if output.status.success() {
- println!("ok ✓");
+ println!("ok");
} else {
eprintln!("FAILED: git worktree {}", args.join(" "));
if !stderr.trim().is_empty() {
@@ -1649,8 +1816,8 @@ mod tests {
fn test_format_status_output_modified_files() {
let porcelain = "## main...origin/main\n M src/main.rs\n M src/lib.rs\n";
let result = format_status_output(porcelain);
- assert!(result.contains("📌 main...origin/main"));
- assert!(result.contains("📝 Modified: 2 files"));
+ assert!(result.contains("* main...origin/main"));
+ assert!(result.contains("~ Modified: 2 files"));
assert!(result.contains("src/main.rs"));
assert!(result.contains("src/lib.rs"));
assert!(!result.contains("Staged"));
@@ -1661,8 +1828,8 @@ mod tests {
fn test_format_status_output_untracked_files() {
let porcelain = "## feature/new\n?? temp.txt\n?? debug.log\n?? test.sh\n";
let result = format_status_output(porcelain);
- assert!(result.contains("📌 feature/new"));
- assert!(result.contains("❓ Untracked: 3 files"));
+ assert!(result.contains("* feature/new"));
+ assert!(result.contains("? Untracked: 3 files"));
assert!(result.contains("temp.txt"));
assert!(result.contains("debug.log"));
assert!(result.contains("test.sh"));
@@ -1678,35 +1845,60 @@ A added.rs
?? untracked.txt
"#;
let result = format_status_output(porcelain);
- assert!(result.contains("📌 main"));
- assert!(result.contains("✅ Staged: 2 files"));
+ assert!(result.contains("* main"));
+ assert!(result.contains("+ Staged: 2 files"));
assert!(result.contains("staged.rs"));
assert!(result.contains("added.rs"));
- assert!(result.contains("📝 Modified: 1 files"));
+ assert!(result.contains("~ Modified: 1 files"));
assert!(result.contains("modified.rs"));
- assert!(result.contains("❓ Untracked: 1 files"));
+ assert!(result.contains("? Untracked: 1 files"));
assert!(result.contains("untracked.txt"));
}
#[test]
fn test_format_status_output_truncation() {
- // Test that >5 staged files show "... +N more"
- let porcelain = r#"## main
-M file1.rs
-M file2.rs
-M file3.rs
-M file4.rs
-M file5.rs
-M file6.rs
-M file7.rs
-"#;
- let result = format_status_output(porcelain);
- assert!(result.contains("✅ Staged: 7 files"));
+ // Test that >15 staged files show "... +N more"
+ let mut porcelain = String::from("## main\n");
+ for i in 1..=20 {
+ porcelain.push_str(&format!("M file{}.rs\n", i));
+ }
+ let result = format_status_output(&porcelain);
+ assert!(result.contains("+ Staged: 20 files"));
+ assert!(result.contains("file1.rs"));
+ assert!(result.contains("file15.rs"));
+ assert!(result.contains("... +5 more"));
+ assert!(!result.contains("file16.rs"));
+ assert!(!result.contains("file20.rs"));
+ }
+
+ #[test]
+ fn test_format_status_modified_truncation() {
+ // Test that >15 modified files show "... +N more"
+ let mut porcelain = String::from("## main\n");
+ for i in 1..=20 {
+ porcelain.push_str(&format!(" M file{}.rs\n", i));
+ }
+ let result = format_status_output(&porcelain);
+ assert!(result.contains("~ Modified: 20 files"));
+ assert!(result.contains("file1.rs"));
+ assert!(result.contains("file15.rs"));
+ assert!(result.contains("... +5 more"));
+ assert!(!result.contains("file16.rs"));
+ }
+
+ #[test]
+ fn test_format_status_untracked_truncation() {
+ // Test that >10 untracked files show "... +N more"
+ let mut porcelain = String::from("## main\n");
+ for i in 1..=15 {
+ porcelain.push_str(&format!("?? file{}.rs\n", i));
+ }
+ let result = format_status_output(&porcelain);
+ assert!(result.contains("? Untracked: 15 files"));
assert!(result.contains("file1.rs"));
- assert!(result.contains("file5.rs"));
- assert!(result.contains("... +2 more"));
- assert!(!result.contains("file6.rs"));
- assert!(!result.contains("file7.rs"));
+ assert!(result.contains("file10.rs"));
+ assert!(result.contains("... +5 more"));
+ assert!(!result.contains("file11.rs"));
}
#[test]
@@ -1916,7 +2108,7 @@ no changes added to commit (use "git add" and/or "git commit -a")
let porcelain = "## main\n M สวัสดี.txt\n?? ทดสอบ.rs\n";
let result = format_status_output(porcelain);
// Should not panic
- assert!(result.contains("📌 main"));
+ assert!(result.contains("* main"));
assert!(result.contains("สวัสดี.txt"));
assert!(result.contains("ทดสอบ.rs"));
}
@@ -1925,7 +2117,7 @@ no changes added to commit (use "git add" and/or "git commit -a")
fn test_format_status_output_emoji_filename() {
let porcelain = "## main\nA 🎉-party.txt\n M 日本語ファイル.rs\n";
let result = format_status_output(porcelain);
- assert!(result.contains("📌 main"));
+ assert!(result.contains("* main"));
}
/// Regression test: --oneline and other user format flags must preserve all commits.
diff --git a/src/go_cmd.rs b/src/go_cmd.rs
index 06ee8b541..d250c4276 100644
--- a/src/go_cmd.rs
+++ b/src/go_cmd.rs
@@ -348,7 +348,7 @@ fn filter_go_test_json(output: &str) -> String {
if !has_failures {
return format!(
- "✓ Go test: {} passed in {} packages",
+ "Go test: {} passed in {} packages",
total_pass, total_packages
);
}
@@ -372,7 +372,7 @@ fn filter_go_test_json(output: &str) -> String {
}
result.push_str(&format!(
- "\n📦 {} [build failed]\n",
+ "\n{} [build failed]\n",
compact_package_name(package)
));
@@ -392,14 +392,14 @@ fn filter_go_test_json(output: &str) -> String {
}
result.push_str(&format!(
- "\n📦 {} ({} passed, {} failed)\n",
+ "\n{} ({} passed, {} failed)\n",
compact_package_name(package),
pkg_result.pass,
pkg_result.fail
));
for (test, outputs) in &pkg_result.failed_tests {
- result.push_str(&format!(" ❌ {}\n", test));
+ result.push_str(&format!(" [FAIL] {}\n", test));
// Show failure output (limit to key lines)
let relevant_lines: Vec<&String> = outputs
@@ -452,7 +452,7 @@ fn filter_go_build(output: &str) -> String {
}
if errors.is_empty() {
- return "✓ Go build: Success".to_string();
+ return "Go build: Success".to_string();
}
let mut result = String::new();
@@ -484,7 +484,7 @@ fn filter_go_vet(output: &str) -> String {
}
if issues.is_empty() {
- return "✓ Go vet: No issues found".to_string();
+ return "Go vet: No issues found".to_string();
}
let mut result = String::new();
@@ -524,7 +524,7 @@ mod tests {
{"Time":"2024-01-01T10:00:02Z","Action":"pass","Package":"example.com/foo","Elapsed":0.5}"#;
let result = filter_go_test_json(output);
- assert!(result.contains("✓ Go test"));
+ assert!(result.contains("Go test"));
assert!(result.contains("1 passed"));
assert!(result.contains("1 packages"));
}
@@ -547,7 +547,7 @@ mod tests {
fn test_filter_go_build_success() {
let output = "";
let result = filter_go_build(output);
- assert!(result.contains("✓ Go build"));
+ assert!(result.contains("Go build"));
assert!(result.contains("Success"));
}
@@ -567,7 +567,7 @@ main.go:15:2: cannot use x (type int) as type string"#;
fn test_filter_go_vet_no_issues() {
let output = "";
let result = filter_go_vet(output);
- assert!(result.contains("✓ Go vet"));
+ assert!(result.contains("Go vet"));
assert!(result.contains("No issues found"));
}
diff --git a/src/golangci_cmd.rs b/src/golangci_cmd.rs
index 72b9ee6f5..b2fdcd28b 100644
--- a/src/golangci_cmd.rs
+++ b/src/golangci_cmd.rs
@@ -1,3 +1,4 @@
+use crate::config;
use crate::tracking;
use crate::utils::{resolved_command, truncate};
use anyhow::{Context, Result};
@@ -9,9 +10,14 @@ struct Position {
#[serde(rename = "Filename")]
filename: String,
#[serde(rename = "Line")]
+ #[allow(dead_code)]
line: usize,
#[serde(rename = "Column")]
+ #[allow(dead_code)]
column: usize,
+ #[serde(rename = "Offset", default)]
+ #[allow(dead_code)]
+ offset: usize,
}
#[derive(Debug, Deserialize)]
@@ -19,9 +25,15 @@ struct Issue {
#[serde(rename = "FromLinter")]
from_linter: String,
#[serde(rename = "Text")]
+ #[allow(dead_code)]
text: String,
#[serde(rename = "Pos")]
pos: Position,
+ #[serde(rename = "SourceLines", default)]
+ source_lines: Vec,
+ #[serde(rename = "Severity", default)]
+ #[allow(dead_code)]
+ severity: String,
}
#[derive(Debug, Deserialize)]
@@ -30,18 +42,63 @@ struct GolangciOutput {
issues: Vec,
}
+/// Parse major version number from `golangci-lint --version` output.
+/// Returns 1 on any failure (safe fallback — v1 behaviour).
+fn parse_major_version(version_output: &str) -> u32 {
+ // Handles:
+ // "golangci-lint version 1.59.1"
+ // "golangci-lint has version 2.10.0 built with ..."
+ for word in version_output.split_whitespace() {
+ if let Some(major) = word.split('.').next().and_then(|s| s.parse::().ok()) {
+ if word.contains('.') {
+ return major;
+ }
+ }
+ }
+ 1
+}
+
+/// Run `golangci-lint --version` and return the major version number.
+/// Returns 1 on any failure.
+fn detect_major_version() -> u32 {
+ let output = resolved_command("golangci-lint").arg("--version").output();
+
+ match output {
+ Ok(o) => {
+ let stdout = String::from_utf8_lossy(&o.stdout);
+ let stderr = String::from_utf8_lossy(&o.stderr);
+ let version_text = if stdout.trim().is_empty() {
+ &*stderr
+ } else {
+ &*stdout
+ };
+ parse_major_version(version_text)
+ }
+ Err(_) => 1,
+ }
+}
+
pub fn run(args: &[String], verbose: u8) -> Result<()> {
let timer = tracking::TimedExecution::start();
+ let version = detect_major_version();
+
let mut cmd = resolved_command("golangci-lint");
- // Force JSON output
- let has_format = args
- .iter()
- .any(|a| a == "--out-format" || a.starts_with("--out-format="));
+ // Force JSON output (only if user hasn't specified it)
+ let has_format = args.iter().any(|a| {
+ a == "--out-format"
+ || a.starts_with("--out-format=")
+ || a == "--output.json.path"
+ || a.starts_with("--output.json.path=")
+ });
if !has_format {
- cmd.arg("run").arg("--out-format=json");
+ if version >= 2 {
+ cmd.arg("run").arg("--output.json.path").arg("stdout");
+ } else {
+ cmd.arg("run").arg("--out-format=json");
+ }
} else {
cmd.arg("run");
}
@@ -51,7 +108,11 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> {
}
if verbose > 0 {
- eprintln!("Running: golangci-lint run --out-format=json");
+ if version >= 2 {
+ eprintln!("Running: golangci-lint run --output.json.path stdout");
+ } else {
+ eprintln!("Running: golangci-lint run --out-format=json");
+ }
}
let output = cmd.output().context(
@@ -62,12 +123,19 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> {
let stderr = String::from_utf8_lossy(&output.stderr);
let raw = format!("{}\n{}", stdout, stderr);
- let filtered = filter_golangci_json(&stdout);
+ // v2 outputs JSON on first line + trailing text; v1 outputs just JSON
+ let json_output = if version >= 2 {
+ stdout.lines().next().unwrap_or("")
+ } else {
+ &*stdout
+ };
+
+ let filtered = filter_golangci_json(json_output, version);
println!("{}", filtered);
- // Include stderr if present (config errors, etc.)
- if !stderr.trim().is_empty() && verbose > 0 {
+ // Always forward stderr (config errors, missing linters, etc.)
+ if !stderr.trim().is_empty() {
eprintln!("{}", stderr.trim());
}
@@ -83,9 +151,6 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> {
match output.status.code() {
Some(0) | Some(1) => Ok(()),
Some(code) => {
- if !stderr.trim().is_empty() {
- eprintln!("{}", stderr.trim());
- }
std::process::exit(code);
}
None => {
@@ -96,17 +161,16 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> {
}
/// Filter golangci-lint JSON output - group by linter and file
-fn filter_golangci_json(output: &str) -> String {
+fn filter_golangci_json(output: &str, version: u32) -> String {
let result: Result = serde_json::from_str(output);
let golangci_output = match result {
Ok(o) => o,
Err(e) => {
- // Fallback if JSON parsing fails
return format!(
"golangci-lint (JSON parse failed: {})\n{}",
e,
- truncate(output, 500)
+ truncate(output, config::limits().passthrough_max_chars)
);
}
};
@@ -114,7 +178,7 @@ fn filter_golangci_json(output: &str) -> String {
let issues = golangci_output.issues;
if issues.is_empty() {
- return "✓ golangci-lint: No issues found".to_string();
+ return "golangci-lint: No issues found".to_string();
}
let total_issues = issues.len();
@@ -133,7 +197,7 @@ fn filter_golangci_json(output: &str) -> String {
// Group by file
let mut by_file: HashMap<&str, usize> = HashMap::new();
for issue in &issues {
- *by_file.entry(&issue.pos.filename).or_insert(0) += 1;
+ *by_file.entry(issue.pos.filename.as_str()).or_insert(0) += 1;
}
let mut file_counts: Vec<_> = by_file.iter().collect();
@@ -166,16 +230,33 @@ fn filter_golangci_json(output: &str) -> String {
result.push_str(&format!(" {} ({} issues)\n", short_path, count));
// Show top 3 linters in this file
- let mut file_linters: HashMap = HashMap::new();
- for issue in issues.iter().filter(|i| &i.pos.filename == *file) {
- *file_linters.entry(issue.from_linter.clone()).or_insert(0) += 1;
+ let mut file_linters: HashMap> = HashMap::new();
+ for issue in issues.iter().filter(|i| i.pos.filename.as_str() == **file) {
+ file_linters
+ .entry(issue.from_linter.clone())
+ .or_default()
+ .push(issue);
}
let mut file_linter_counts: Vec<_> = file_linters.iter().collect();
- file_linter_counts.sort_by(|a, b| b.1.cmp(a.1));
-
- for (linter, count) in file_linter_counts.iter().take(3) {
- result.push_str(&format!(" {} ({})\n", linter, count));
+ file_linter_counts.sort_by(|a, b| b.1.len().cmp(&a.1.len()));
+
+ for (linter, linter_issues) in file_linter_counts.iter().take(3) {
+ result.push_str(&format!(" {} ({})\n", linter, linter_issues.len()));
+
+ // v2 only: show first source line for this linter-file group
+ if version >= 2 {
+ if let Some(first_issue) = linter_issues.first() {
+ if let Some(source_line) = first_issue.source_lines.first() {
+ let trimmed = source_line.trim();
+ let display = match trimmed.char_indices().nth(80) {
+ Some((i, _)) => &trimmed[..i],
+ None => trimmed,
+ };
+ result.push_str(&format!(" → {}\n", display));
+ }
+ }
+ }
}
}
@@ -210,8 +291,8 @@ mod tests {
#[test]
fn test_filter_golangci_no_issues() {
let output = r#"{"Issues":[]}"#;
- let result = filter_golangci_json(output);
- assert!(result.contains("✓ golangci-lint"));
+ let result = filter_golangci_json(output, 1);
+ assert!(result.contains("golangci-lint"));
assert!(result.contains("No issues found"));
}
@@ -237,7 +318,7 @@ mod tests {
]
}"#;
- let result = filter_golangci_json(output);
+ let result = filter_golangci_json(output, 1);
assert!(result.contains("3 issues"));
assert!(result.contains("2 files"));
assert!(result.contains("errcheck"));
@@ -262,4 +343,183 @@ mod tests {
);
assert_eq!(compact_path("relative/file.go"), "file.go");
}
+
+ #[test]
+ fn test_parse_version_v1_format() {
+ assert_eq!(parse_major_version("golangci-lint version 1.59.1"), 1);
+ }
+
+ #[test]
+ fn test_parse_version_v2_format() {
+ assert_eq!(
+ parse_major_version("golangci-lint has version 2.10.0 built with go1.26.0 from 95dcb68a on 2026-02-17T13:05:51Z"),
+ 2
+ );
+ }
+
+ #[test]
+ fn test_parse_version_empty_returns_1() {
+ assert_eq!(parse_major_version(""), 1);
+ }
+
+ #[test]
+ fn test_parse_version_malformed_returns_1() {
+ assert_eq!(parse_major_version("not a version string"), 1);
+ }
+
+ #[test]
+ fn test_filter_golangci_v2_fields_parse_cleanly() {
+ // v2 JSON includes Severity, SourceLines, Offset — must not panic
+ let output = r#"{
+ "Issues": [
+ {
+ "FromLinter": "errcheck",
+ "Text": "Error return value not checked",
+ "Severity": "error",
+ "SourceLines": [" if err := foo(); err != nil {"],
+ "Pos": {"Filename": "main.go", "Line": 42, "Column": 5, "Offset": 1024}
+ }
+ ]
+}"#;
+ let result = filter_golangci_json(output, 2);
+ assert!(result.contains("errcheck"));
+ assert!(result.contains("main.go"));
+ }
+
+ #[test]
+ fn test_filter_v2_shows_source_lines() {
+ let output = r#"{
+ "Issues": [
+ {
+ "FromLinter": "errcheck",
+ "Text": "Error return value not checked",
+ "Severity": "error",
+ "SourceLines": [" if err := foo(); err != nil {"],
+ "Pos": {"Filename": "main.go", "Line": 42, "Column": 5, "Offset": 0}
+ }
+ ]
+}"#;
+ let result = filter_golangci_json(output, 2);
+ assert!(
+ result.contains("→"),
+ "v2 should show source line with → prefix"
+ );
+ assert!(result.contains("if err := foo()"));
+ }
+
+ #[test]
+ fn test_filter_v1_does_not_show_source_lines() {
+ let output = r#"{
+ "Issues": [
+ {
+ "FromLinter": "errcheck",
+ "Text": "Error return value not checked",
+ "Severity": "error",
+ "SourceLines": [" if err := foo(); err != nil {"],
+ "Pos": {"Filename": "main.go", "Line": 42, "Column": 5, "Offset": 0}
+ }
+ ]
+}"#;
+ let result = filter_golangci_json(output, 1);
+ assert!(!result.contains("→"), "v1 should not show source lines");
+ }
+
+ #[test]
+ fn test_filter_v2_empty_source_lines_graceful() {
+ let output = r#"{
+ "Issues": [
+ {
+ "FromLinter": "errcheck",
+ "Text": "Error return value not checked",
+ "Severity": "",
+ "SourceLines": [],
+ "Pos": {"Filename": "main.go", "Line": 42, "Column": 5, "Offset": 0}
+ }
+ ]
+}"#;
+ let result = filter_golangci_json(output, 2);
+ assert!(result.contains("errcheck"));
+ assert!(
+ !result.contains("→"),
+ "no source line to show, should degrade gracefully"
+ );
+ }
+
+ #[test]
+ fn test_filter_v2_source_line_truncated_to_80_chars() {
+ let long_line = "x".repeat(120);
+ let output = format!(
+ r#"{{
+ "Issues": [
+ {{
+ "FromLinter": "lll",
+ "Text": "line too long",
+ "Severity": "",
+ "SourceLines": ["{}"],
+ "Pos": {{"Filename": "main.go", "Line": 1, "Column": 1, "Offset": 0}}
+ }}
+ ]
+}}"#,
+ long_line
+ );
+ let result = filter_golangci_json(&output, 2);
+ // Content truncated at 80 chars; prefix " → " = 10 bytes (6 spaces + 3-byte arrow + space)
+ // Total line max = 80 + 10 = 90 bytes
+ for line in result.lines() {
+ if line.trim_start().starts_with('→') {
+ assert!(line.len() <= 90, "source line too long: {}", line.len());
+ }
+ }
+ }
+
+ #[test]
+ fn test_filter_v2_source_line_truncated_non_ascii() {
+ // Japanese characters are 3 bytes each; 30 chars = 90 bytes > 80 bytes naive slice would panic
+ let long_line = "日".repeat(30); // 30 chars, 90 bytes
+ let output = format!(
+ r#"{{
+ "Issues": [
+ {{
+ "FromLinter": "lll",
+ "Text": "line too long",
+ "Severity": "",
+ "SourceLines": ["{}"],
+ "Pos": {{"Filename": "main.go", "Line": 1, "Column": 1, "Offset": 0}}
+ }}
+ ]
+}}"#,
+ long_line
+ );
+ // Should not panic and output should be ≤ 80 chars
+ let result = filter_golangci_json(&output, 2);
+ for line in result.lines() {
+ if line.trim_start().starts_with('→') {
+ let content = line.trim_start().trim_start_matches('→').trim();
+ assert!(
+ content.chars().count() <= 80,
+ "content chars: {}",
+ content.chars().count()
+ );
+ }
+ }
+ }
+
+ fn count_tokens(text: &str) -> usize {
+ text.split_whitespace().count()
+ }
+
+ #[test]
+ fn test_golangci_v2_token_savings() {
+ let raw = include_str!("../tests/fixtures/golangci_v2_json.txt");
+
+ let filtered = filter_golangci_json(raw, 2);
+ let savings = 100.0 - (count_tokens(&filtered) as f64 / count_tokens(raw) as f64 * 100.0);
+
+ assert!(
+ savings >= 60.0,
+ "Expected ≥60% token savings, got {:.1}%\nFiltered output:\n{}",
+ savings,
+ filtered
+ );
+ }
}
diff --git a/src/grep_cmd.rs b/src/grep_cmd.rs
index 0ba587700..c1819dded 100644
--- a/src/grep_cmd.rs
+++ b/src/grep_cmd.rs
@@ -1,9 +1,11 @@
+use crate::config;
use crate::tracking;
use crate::utils::resolved_command;
use anyhow::{Context, Result};
use regex::Regex;
use std::collections::HashMap;
+#[allow(clippy::too_many_arguments)]
pub fn run(
pattern: &str,
path: &str,
@@ -60,7 +62,7 @@ pub fn run(
eprintln!("{}", stderr.trim());
}
}
- let msg = format!("🔍 0 for '{}'", pattern);
+ let msg = format!("0 matches for '{}'", pattern);
println!("{}", msg);
timer.track(
&format!("grep -rn '{}' {}", pattern, path),
@@ -77,6 +79,13 @@ pub fn run(
let mut by_file: HashMap> = HashMap::new();
let mut total = 0;
+ // Compile context regex once (instead of per-line in clean_line)
+ let context_re = if context_only {
+ Regex::new(&format!("(?i).{{0,20}}{}.*", regex::escape(pattern))).ok()
+ } else {
+ None
+ };
+
for line in stdout.lines() {
let parts: Vec<&str> = line.splitn(3, ':').collect();
@@ -91,12 +100,12 @@ pub fn run(
};
total += 1;
- let cleaned = clean_line(content, max_line_len, context_only, pattern);
+ let cleaned = clean_line(content, max_line_len, context_re.as_ref(), pattern);
by_file.entry(file).or_default().push((line_num, cleaned));
}
let mut rtk_output = String::new();
- rtk_output.push_str(&format!("🔍 {} in {}F:\n\n", total, by_file.len()));
+ rtk_output.push_str(&format!("{} matches in {}F:\n\n", total, by_file.len()));
let mut shown = 0;
let mut files: Vec<_> = by_file.iter().collect();
@@ -108,9 +117,10 @@ pub fn run(
}
let file_display = compact_path(file);
- rtk_output.push_str(&format!("📄 {} ({}):\n", file_display, matches.len()));
+ rtk_output.push_str(&format!("[file] {} ({}):\n", file_display, matches.len()));
- for (line_num, content) in matches.iter().take(10) {
+ let per_file = config::limits().grep_max_per_file;
+ for (line_num, content) in matches.iter().take(per_file) {
rtk_output.push_str(&format!(" {:>4}: {}\n", line_num, content));
shown += 1;
if shown >= max_results {
@@ -118,8 +128,8 @@ pub fn run(
}
}
- if matches.len() > 10 {
- rtk_output.push_str(&format!(" +{}\n", matches.len() - 10));
+ if matches.len() > per_file {
+ rtk_output.push_str(&format!(" +{}\n", matches.len() - per_file));
}
rtk_output.push('\n');
}
@@ -143,16 +153,14 @@ pub fn run(
Ok(())
}
-fn clean_line(line: &str, max_len: usize, context_only: bool, pattern: &str) -> String {
+fn clean_line(line: &str, max_len: usize, context_re: Option<&Regex>, pattern: &str) -> String {
let trimmed = line.trim();
- if context_only {
- if let Ok(re) = Regex::new(&format!("(?i).{{0,20}}{}.*", regex::escape(pattern))) {
- if let Some(m) = re.find(trimmed) {
- let matched = m.as_str();
- if matched.len() <= max_len {
- return matched.to_string();
- }
+ if let Some(re) = context_re {
+ if let Some(m) = re.find(trimmed) {
+ let matched = m.as_str();
+ if matched.len() <= max_len {
+ return matched.to_string();
}
}
}
@@ -216,7 +224,7 @@ mod tests {
#[test]
fn test_clean_line() {
let line = " const result = someFunction();";
- let cleaned = clean_line(line, 50, false, "result");
+ let cleaned = clean_line(line, 50, None, "result");
assert!(!cleaned.starts_with(' '));
assert!(cleaned.len() <= 50);
}
@@ -240,7 +248,7 @@ mod tests {
fn test_clean_line_multibyte() {
// Thai text that exceeds max_len in bytes
let line = " สวัสดีครับ นี่คือข้อความที่ยาวมากสำหรับทดสอบ ";
- let cleaned = clean_line(line, 20, false, "ครับ");
+ let cleaned = clean_line(line, 20, None, "ครับ");
// Should not panic
assert!(!cleaned.is_empty());
}
@@ -248,7 +256,7 @@ mod tests {
#[test]
fn test_clean_line_emoji() {
let line = "🎉🎊🎈🎁🎂🎄 some text 🎃🎆🎇✨";
- let cleaned = clean_line(line, 15, false, "text");
+ let cleaned = clean_line(line, 15, None, "text");
assert!(!cleaned.is_empty());
}
diff --git a/src/hook_check.rs b/src/hook_check.rs
index 2716ec15d..50a6537a8 100644
--- a/src/hook_check.rs
+++ b/src/hook_check.rs
@@ -1,6 +1,6 @@
use std::path::PathBuf;
-const CURRENT_HOOK_VERSION: u8 = 2;
+const CURRENT_HOOK_VERSION: u8 = 3;
const WARN_INTERVAL_SECS: u64 = 24 * 3600;
/// Hook status for diagnostics and `rtk gain`.
diff --git a/src/hook_cmd.rs b/src/hook_cmd.rs
new file mode 100644
index 000000000..29a7365d2
--- /dev/null
+++ b/src/hook_cmd.rs
@@ -0,0 +1,333 @@
+use anyhow::{Context, Result};
+use serde_json::{json, Value};
+use std::io::{self, Read};
+
+use crate::discover::registry::rewrite_command;
+
+// ── Copilot hook (VS Code + Copilot CLI) ──────────────────────
+
+/// Format detected from the preToolUse JSON input.
+enum HookFormat {
+ /// VS Code Copilot Chat / Claude Code: `tool_name` + `tool_input.command`, supports `updatedInput`.
+ VsCode { command: String },
+ /// GitHub Copilot CLI: camelCase `toolName` + `toolArgs` (JSON string), deny-with-suggestion only.
+ CopilotCli { command: String },
+ /// Non-bash tool, already uses rtk, or unknown format — pass through silently.
+ PassThrough,
+}
+
+/// Run the Copilot preToolUse hook.
+/// Auto-detects VS Code Copilot Chat vs Copilot CLI format.
+pub fn run_copilot() -> Result<()> {
+ let mut input = String::new();
+ io::stdin()
+ .read_to_string(&mut input)
+ .context("Failed to read stdin")?;
+
+ let input = input.trim();
+ if input.is_empty() {
+ return Ok(());
+ }
+
+ let v: Value = match serde_json::from_str(input) {
+ Ok(v) => v,
+ Err(e) => {
+ eprintln!("[rtk hook] Failed to parse JSON input: {e}");
+ return Ok(());
+ }
+ };
+
+ match detect_format(&v) {
+ HookFormat::VsCode { command } => handle_vscode(&command),
+ HookFormat::CopilotCli { command } => handle_copilot_cli(&command),
+ HookFormat::PassThrough => Ok(()),
+ }
+}
+
+fn detect_format(v: &Value) -> HookFormat {
+ // VS Code Copilot Chat / Claude Code: snake_case keys
+ if let Some(tool_name) = v.get("tool_name").and_then(|t| t.as_str()) {
+ if matches!(tool_name, "runTerminalCommand" | "Bash" | "bash") {
+ if let Some(cmd) = v
+ .pointer("/tool_input/command")
+ .and_then(|c| c.as_str())
+ .filter(|c| !c.is_empty())
+ {
+ return HookFormat::VsCode {
+ command: cmd.to_string(),
+ };
+ }
+ }
+ return HookFormat::PassThrough;
+ }
+
+ // Copilot CLI: camelCase keys, toolArgs is a JSON-encoded string
+ if let Some(tool_name) = v.get("toolName").and_then(|t| t.as_str()) {
+ if tool_name == "bash" {
+ if let Some(tool_args_str) = v.get("toolArgs").and_then(|t| t.as_str()) {
+ if let Ok(tool_args) = serde_json::from_str::(tool_args_str) {
+ if let Some(cmd) = tool_args
+ .get("command")
+ .and_then(|c| c.as_str())
+ .filter(|c| !c.is_empty())
+ {
+ return HookFormat::CopilotCli {
+ command: cmd.to_string(),
+ };
+ }
+ }
+ }
+ }
+ return HookFormat::PassThrough;
+ }
+
+ HookFormat::PassThrough
+}
+
+fn get_rewritten(cmd: &str) -> Option {
+ if cmd.contains("<<") {
+ return None;
+ }
+
+ let excluded = crate::config::Config::load()
+ .map(|c| c.hooks.exclude_commands)
+ .unwrap_or_default();
+
+ let rewritten = rewrite_command(cmd, &excluded)?;
+
+ if rewritten == cmd {
+ return None;
+ }
+
+ Some(rewritten)
+}
+
+fn handle_vscode(cmd: &str) -> Result<()> {
+ let rewritten = match get_rewritten(cmd) {
+ Some(r) => r,
+ None => return Ok(()),
+ };
+
+ let output = json!({
+ "hookSpecificOutput": {
+ "hookEventName": "PreToolUse",
+ "permissionDecision": "allow",
+ "permissionDecisionReason": "RTK auto-rewrite",
+ "updatedInput": { "command": rewritten }
+ }
+ });
+ println!("{output}");
+ Ok(())
+}
+
+fn handle_copilot_cli(cmd: &str) -> Result<()> {
+ let rewritten = match get_rewritten(cmd) {
+ Some(r) => r,
+ None => return Ok(()),
+ };
+
+ let output = json!({
+ "permissionDecision": "deny",
+ "permissionDecisionReason": format!(
+ "Token savings: use `{}` instead (rtk saves 60-90% tokens)",
+ rewritten
+ )
+ });
+ println!("{output}");
+ Ok(())
+}
+
+// ── Gemini hook ───────────────────────────────────────────────
+
+/// Run the Gemini CLI BeforeTool hook.
+/// Reads JSON from stdin, rewrites shell commands to rtk equivalents,
+/// outputs JSON to stdout in Gemini CLI format.
+pub fn run_gemini() -> Result<()> {
+ let mut input = String::new();
+ io::stdin()
+ .read_to_string(&mut input)
+ .context("Failed to read hook input from stdin")?;
+
+ let json: Value = serde_json::from_str(&input).context("Failed to parse hook input as JSON")?;
+
+ let tool_name = json.get("tool_name").and_then(|v| v.as_str()).unwrap_or("");
+
+ if tool_name != "run_shell_command" {
+ print_allow();
+ return Ok(());
+ }
+
+ let cmd = json
+ .pointer("/tool_input/command")
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+
+ if cmd.is_empty() {
+ print_allow();
+ return Ok(());
+ }
+
+ // Delegate to the single source of truth for command rewriting
+ match rewrite_command(cmd, &[]) {
+ Some(rewritten) => print_rewrite(&rewritten),
+ None => print_allow(),
+ }
+
+ Ok(())
+}
+
+fn print_allow() {
+ println!(r#"{{"decision":"allow"}}"#);
+}
+
+fn print_rewrite(cmd: &str) {
+ let output = serde_json::json!({
+ "decision": "allow",
+ "hookSpecificOutput": {
+ "tool_input": {
+ "command": cmd
+ }
+ }
+ });
+ println!("{}", output);
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ // --- Copilot format detection ---
+
+ fn vscode_input(tool: &str, cmd: &str) -> Value {
+ json!({
+ "tool_name": tool,
+ "tool_input": { "command": cmd }
+ })
+ }
+
+ fn copilot_cli_input(cmd: &str) -> Value {
+ let args = serde_json::to_string(&json!({ "command": cmd })).unwrap();
+ json!({ "toolName": "bash", "toolArgs": args })
+ }
+
+ #[test]
+ fn test_detect_vscode_bash() {
+ assert!(matches!(
+ detect_format(&vscode_input("Bash", "git status")),
+ HookFormat::VsCode { .. }
+ ));
+ }
+
+ #[test]
+ fn test_detect_vscode_run_terminal_command() {
+ assert!(matches!(
+ detect_format(&vscode_input("runTerminalCommand", "cargo test")),
+ HookFormat::VsCode { .. }
+ ));
+ }
+
+ #[test]
+ fn test_detect_copilot_cli_bash() {
+ assert!(matches!(
+ detect_format(&copilot_cli_input("git status")),
+ HookFormat::CopilotCli { .. }
+ ));
+ }
+
+ #[test]
+ fn test_detect_non_bash_is_passthrough() {
+ let v = json!({ "tool_name": "editFiles" });
+ assert!(matches!(detect_format(&v), HookFormat::PassThrough));
+ }
+
+ #[test]
+ fn test_detect_unknown_is_passthrough() {
+ assert!(matches!(detect_format(&json!({})), HookFormat::PassThrough));
+ }
+
+ #[test]
+ fn test_get_rewritten_supported() {
+ assert!(get_rewritten("git status").is_some());
+ }
+
+ #[test]
+ fn test_get_rewritten_unsupported() {
+ assert!(get_rewritten("htop").is_none());
+ }
+
+ #[test]
+ fn test_get_rewritten_already_rtk() {
+ assert!(get_rewritten("rtk git status").is_none());
+ }
+
+ #[test]
+ fn test_get_rewritten_heredoc() {
+ assert!(get_rewritten("cat <<'EOF'\nhello\nEOF").is_none());
+ }
+
+ // --- Gemini format ---
+
+ #[test]
+ fn test_print_allow_format() {
+ // Verify the allow JSON format matches Gemini CLI expectations
+ let expected = r#"{"decision":"allow"}"#;
+ assert_eq!(expected, r#"{"decision":"allow"}"#);
+ }
+
+ #[test]
+ fn test_print_rewrite_format() {
+ let output = serde_json::json!({
+ "decision": "allow",
+ "hookSpecificOutput": {
+ "tool_input": {
+ "command": "rtk git status"
+ }
+ }
+ });
+ let json: Value = serde_json::from_str(&output.to_string()).unwrap();
+ assert_eq!(json["decision"], "allow");
+ assert_eq!(
+ json["hookSpecificOutput"]["tool_input"]["command"],
+ "rtk git status"
+ );
+ }
+
+ #[test]
+ fn test_gemini_hook_uses_rewrite_command() {
+ // Verify that rewrite_command handles the cases we need for Gemini
+ assert_eq!(
+ rewrite_command("git status", &[]),
+ Some("rtk git status".into())
+ );
+ assert_eq!(
+ rewrite_command("cargo test", &[]),
+ Some("rtk cargo test".into())
+ );
+ // Already rtk → returned as-is (idempotent)
+ assert_eq!(
+ rewrite_command("rtk git status", &[]),
+ Some("rtk git status".into())
+ );
+ // Heredoc → no rewrite
+ assert_eq!(rewrite_command("cat < Result<()> {
+ // Validation: Codex mode conflicts
+ if codex {
+ if install_opencode {
+ anyhow::bail!("--codex cannot be combined with --opencode");
+ }
+ if claude_md {
+ anyhow::bail!("--codex cannot be combined with --claude-md");
+ }
+ if hook_only {
+ anyhow::bail!("--codex cannot be combined with --hook-only");
+ }
+ if matches!(patch_mode, PatchMode::Auto) {
+ anyhow::bail!("--codex cannot be combined with --auto-patch");
+ }
+ if matches!(patch_mode, PatchMode::Skip) {
+ anyhow::bail!("--codex cannot be combined with --no-patch");
+ }
+ return run_codex_mode(global, verbose);
+ }
+
+ // Validation: Global-only features
if install_opencode && !global {
anyhow::bail!("OpenCode plugin is global-only. Use: rtk init -g --opencode");
}
- // Mode selection
+ if install_cursor && !global {
+ anyhow::bail!("Cursor hooks are global-only. Use: rtk init -g --agent cursor");
+ }
+
+ if install_windsurf && !global {
+ anyhow::bail!("Windsurf support is global-only. Use: rtk init -g --agent windsurf");
+ }
+
+ // Windsurf-only mode
+ if install_windsurf {
+ return run_windsurf_mode(verbose);
+ }
+
+ // Cline-only mode
+ if install_cline {
+ return run_cline_mode(verbose);
+ }
+
+ // Mode selection (Claude Code / OpenCode)
match (install_claude, install_opencode, claude_md, hook_only) {
- (false, true, _, _) => run_opencode_only_mode(verbose),
- (true, opencode, true, _) => run_claude_md_mode(global, verbose, opencode),
- (true, opencode, false, true) => run_hook_only_mode(global, patch_mode, verbose, opencode),
- (true, opencode, false, false) => run_default_mode(global, patch_mode, verbose, opencode),
+ (false, true, _, _) => run_opencode_only_mode(verbose)?,
+ (true, opencode, true, _) => run_claude_md_mode(global, verbose, opencode)?,
+ (true, opencode, false, true) => run_hook_only_mode(global, patch_mode, verbose, opencode)?,
+ (true, opencode, false, false) => run_default_mode(global, patch_mode, verbose, opencode)?,
(false, false, _, _) => {
- anyhow::bail!("at least one of install_claude or install_opencode must be true")
+ if !install_cursor {
+ anyhow::bail!("at least one of install_claude or install_opencode must be true")
+ }
}
}
+
+ // Cursor hooks (additive, installed alongside Claude Code)
+ if install_cursor {
+ install_cursor_hooks(verbose)?;
+ }
+
+ // Telemetry notice (shown once during init)
+ println!();
+ println!(" [info] Anonymous telemetry is enabled (opt-out: RTK_TELEMETRY_DISABLED=1)");
+ println!(" [info] See: https://github.com/rtk-ai/rtk#privacy--telemetry");
+
+ Ok(())
}
/// Prepare hook directory and return paths (hook_dir, hook_path)
@@ -458,8 +520,30 @@ fn remove_hook_from_settings(verbose: u8) -> Result {
Ok(removed)
}
-/// Full uninstall: remove hook, RTK.md, @RTK.md reference, settings.json entry
-pub fn uninstall(global: bool, verbose: u8) -> Result<()> {
+/// Full uninstall for Claude, Gemini, Codex, or Cursor artifacts.
+pub fn uninstall(global: bool, gemini: bool, codex: bool, cursor: bool, verbose: u8) -> Result<()> {
+ if codex {
+ return uninstall_codex(global, verbose);
+ }
+
+ if cursor {
+ if !global {
+ anyhow::bail!("Cursor uninstall only works with --global flag");
+ }
+ let cursor_removed =
+ remove_cursor_hooks(verbose).context("Failed to remove Cursor hooks")?;
+ if !cursor_removed.is_empty() {
+ println!("RTK uninstalled (Cursor):");
+ for item in &cursor_removed {
+ println!(" - {}", item);
+ }
+ println!("\nRestart Cursor to apply changes.");
+ } else {
+ println!("RTK Cursor support was not installed (nothing to remove)");
+ }
+ return Ok(());
+ }
+
if !global {
anyhow::bail!("Uninstall only works with --global flag. For local projects, manually remove RTK from CLAUDE.md");
}
@@ -467,6 +551,22 @@ pub fn uninstall(global: bool, verbose: u8) -> Result<()> {
let claude_dir = resolve_claude_dir()?;
let mut removed = Vec::new();
+ // Also uninstall Gemini artifacts if --gemini or always (clean everything)
+ if gemini {
+ let gemini_removed = uninstall_gemini(verbose)?;
+ removed.extend(gemini_removed);
+ if !removed.is_empty() {
+ println!("RTK uninstalled (Gemini):");
+ for item in &removed {
+ println!(" - {}", item);
+ }
+ println!("\nRestart Gemini CLI to apply changes.");
+ } else {
+ println!("RTK Gemini support was not installed (nothing to remove)");
+ }
+ return Ok(());
+ }
+
// 1. Remove hook file
let hook_path = claude_dir.join("hooks").join("rtk-rewrite.sh");
if hook_path.exists() {
@@ -507,7 +607,7 @@ pub fn uninstall(global: bool, verbose: u8) -> Result<()> {
fs::write(&claude_md_path, cleaned).with_context(|| {
format!("Failed to write CLAUDE.md: {}", claude_md_path.display())
})?;
- removed.push(format!("CLAUDE.md: removed @RTK.md reference"));
+ removed.push("CLAUDE.md: removed @RTK.md reference".to_string());
}
}
@@ -522,6 +622,10 @@ pub fn uninstall(global: bool, verbose: u8) -> Result<()> {
removed.push(format!("OpenCode plugin: {}", path.display()));
}
+ // 6. Remove Cursor hooks
+ let cursor_removed = remove_cursor_hooks(verbose)?;
+ removed.extend(cursor_removed);
+
// Report results
if removed.is_empty() {
println!("RTK was not installed (nothing to remove)");
@@ -530,12 +634,55 @@ pub fn uninstall(global: bool, verbose: u8) -> Result<()> {
for item in removed {
println!(" - {}", item);
}
- println!("\nRestart Claude Code and OpenCode (if used) to apply changes.");
+ println!("\nRestart Claude Code, OpenCode, and Cursor (if used) to apply changes.");
+ }
+
+ Ok(())
+}
+
+fn uninstall_codex(global: bool, verbose: u8) -> Result<()> {
+ if !global {
+ anyhow::bail!(
+ "Uninstall only works with --global flag. For local projects, manually remove RTK from AGENTS.md"
+ );
+ }
+
+ let codex_dir = resolve_codex_dir()?;
+ let removed = uninstall_codex_at(&codex_dir, verbose)?;
+
+ if removed.is_empty() {
+ println!("RTK was not installed for Codex CLI (nothing to remove)");
+ } else {
+ println!("RTK uninstalled for Codex CLI:");
+ for item in removed {
+ println!(" - {}", item);
+ }
}
Ok(())
}
+fn uninstall_codex_at(codex_dir: &Path, verbose: u8) -> Result> {
+ let mut removed = Vec::new();
+
+ let rtk_md_path = codex_dir.join("RTK.md");
+ if rtk_md_path.exists() {
+ fs::remove_file(&rtk_md_path)
+ .with_context(|| format!("Failed to remove RTK.md: {}", rtk_md_path.display()))?;
+ if verbose > 0 {
+ eprintln!("Removed RTK.md: {}", rtk_md_path.display());
+ }
+ removed.push(format!("RTK.md: {}", rtk_md_path.display()));
+ }
+
+ let agents_md_path = codex_dir.join("AGENTS.md");
+ if remove_rtk_reference_from_agents(&agents_md_path, verbose)? {
+ removed.push("AGENTS.md: removed @RTK.md reference".to_string());
+ }
+
+ Ok(removed)
+}
+
/// Orchestrator: patch settings.json with RTK hook
/// Handles reading, checking, prompting, merging, backing up, and atomic writing
fn patch_settings_json(
@@ -566,7 +713,7 @@ fn patch_settings_json(
};
// Check idempotency
- if hook_already_present(&root, &hook_command) {
+ if hook_already_present(&root, hook_command) {
if verbose > 0 {
eprintln!("settings.json: hook already present");
}
@@ -591,7 +738,7 @@ fn patch_settings_json(
}
// Deep-merge hook
- insert_hook_entry(&mut root, &hook_command);
+ insert_hook_entry(&mut root, hook_command);
// Backup original
if settings_path.exists() {
@@ -637,7 +784,6 @@ fn clean_double_blanks(content: &str) -> String {
if line.trim().is_empty() {
// Count consecutive blank lines
let mut blank_count = 0;
- let start = i;
while i < lines.len() && lines[i].trim().is_empty() {
blank_count += 1;
i += 1;
@@ -645,9 +791,7 @@ fn clean_double_blanks(content: &str) -> String {
// Keep at most 2 blank lines
let keep = blank_count.min(2);
- for _ in 0..keep {
- result.push("");
- }
+ result.extend(std::iter::repeat_n("", keep));
} else {
result.push(line);
i += 1;
@@ -725,7 +869,7 @@ fn run_default_mode(
_verbose: u8,
_install_opencode: bool,
) -> Result<()> {
- eprintln!("⚠️ Hook-based mode requires Unix (macOS/Linux).");
+ eprintln!("[warn] Hook-based mode requires Unix (macOS/Linux).");
eprintln!(" Windows: use --claude-md mode for full injection.");
eprintln!(" Falling back to --claude-md mode.");
run_claude_md_mode(_global, _verbose, _install_opencode)
@@ -782,7 +926,7 @@ fn run_default_mode(
println!(" CLAUDE.md: @RTK.md reference added");
if migrated {
- println!("\n ✅ Migrated: removed 137-line RTK block from CLAUDE.md");
+ println!("\n [ok] Migrated: removed 137-line RTK block from CLAUDE.md");
println!(" replaced with @RTK.md (10 lines)");
}
@@ -883,7 +1027,7 @@ fn run_hook_only_mode(
install_opencode: bool,
) -> Result<()> {
if !global {
- eprintln!("⚠️ Warning: --hook-only only makes sense with --global");
+ eprintln!("[warn] Warning: --hook-only only makes sense with --global");
eprintln!(" For local projects, use default mode or --claude-md");
return Ok(());
}
@@ -966,22 +1110,22 @@ fn run_claude_md_mode(global: bool, verbose: u8, install_opencode: bool) -> Resu
match action {
RtkBlockUpsert::Added => {
fs::write(&path, new_content)?;
- println!("✅ Added rtk instructions to existing {}", path.display());
+ println!("[ok] Added rtk instructions to existing {}", path.display());
}
RtkBlockUpsert::Updated => {
fs::write(&path, new_content)?;
- println!("✅ Updated rtk instructions in {}", path.display());
+ println!("[ok] Updated rtk instructions in {}", path.display());
}
RtkBlockUpsert::Unchanged => {
println!(
- "✅ {} already contains up-to-date rtk instructions",
+ "[ok] {} already contains up-to-date rtk instructions",
path.display()
);
return Ok(());
}
RtkBlockUpsert::Malformed => {
eprintln!(
- "⚠️ Warning: Found '\nold\n\n",
+ )
+ .unwrap();
+
+ let added = patch_agents_md(&agents_md, 0).unwrap();
+
+ assert!(added);
+ let content = fs::read_to_string(&agents_md).unwrap();
+ assert!(!content.contains("old"));
+ assert_eq!(content.matches("@RTK.md").count(), 1);
+ }
+
+ #[test]
+ fn test_uninstall_codex_at_is_idempotent() {
+ let temp = TempDir::new().unwrap();
+ let codex_dir = temp.path();
+ let agents_md = codex_dir.join("AGENTS.md");
+ let rtk_md = codex_dir.join("RTK.md");
+
+ fs::write(&agents_md, "# Team rules\n\n@RTK.md\n").unwrap();
+ fs::write(&rtk_md, "codex config").unwrap();
+
+ let removed_first = uninstall_codex_at(codex_dir, 0).unwrap();
+ let removed_second = uninstall_codex_at(codex_dir, 0).unwrap();
+
+ assert_eq!(removed_first.len(), 2);
+ assert!(removed_second.is_empty());
+ assert!(!rtk_md.exists());
+
+ let content = fs::read_to_string(&agents_md).unwrap();
+ assert!(!content.contains("@RTK.md"));
+ assert!(content.contains("# Team rules"));
+ }
+
#[test]
fn test_local_init_unchanged() {
// Local init should use claude-md mode
@@ -1774,8 +2790,8 @@ More notes
let serialized = serde_json::to_string(&parsed).unwrap();
// Keys should appear in same order
- let original_keys: Vec<&str> = original.split("\"").filter(|s| s.contains(":")).collect();
- let serialized_keys: Vec<&str> =
+ let _original_keys: Vec<&str> = original.split("\"").filter(|s| s.contains(":")).collect();
+ let _serialized_keys: Vec<&str> =
serialized.split("\"").filter(|s| s.contains(":")).collect();
// Just check that keys exist (preserve_order doesn't guarantee exact order in nested objects)
@@ -1856,4 +2872,132 @@ More notes
let removed = remove_hook_from_json(&mut json_content);
assert!(!removed);
}
+
+ // ─── Cursor hooks.json tests ───
+
+ #[test]
+ fn test_cursor_hook_already_present_true() {
+ let json_content = serde_json::json!({
+ "version": 1,
+ "hooks": {
+ "preToolUse": [{
+ "command": "./hooks/rtk-rewrite.sh",
+ "matcher": "Shell"
+ }]
+ }
+ });
+ assert!(cursor_hook_already_present(&json_content));
+ }
+
+ #[test]
+ fn test_cursor_hook_already_present_false_empty() {
+ let json_content = serde_json::json!({ "version": 1 });
+ assert!(!cursor_hook_already_present(&json_content));
+ }
+
+ #[test]
+ fn test_cursor_hook_already_present_false_other_hooks() {
+ let json_content = serde_json::json!({
+ "version": 1,
+ "hooks": {
+ "preToolUse": [{
+ "command": "./hooks/some-other-hook.sh",
+ "matcher": "Shell"
+ }]
+ }
+ });
+ assert!(!cursor_hook_already_present(&json_content));
+ }
+
+ #[test]
+ fn test_insert_cursor_hook_entry_empty() {
+ let mut json_content = serde_json::json!({ "version": 1 });
+ insert_cursor_hook_entry(&mut json_content);
+
+ let hooks = json_content["hooks"]["preToolUse"].as_array().unwrap();
+ assert_eq!(hooks.len(), 1);
+ assert_eq!(hooks[0]["command"], "./hooks/rtk-rewrite.sh");
+ assert_eq!(hooks[0]["matcher"], "Shell");
+ assert_eq!(json_content["version"], 1);
+ }
+
+ #[test]
+ fn test_insert_cursor_hook_preserves_existing() {
+ let mut json_content = serde_json::json!({
+ "version": 1,
+ "hooks": {
+ "preToolUse": [{
+ "command": "./hooks/other.sh",
+ "matcher": "Shell"
+ }],
+ "afterFileEdit": [{
+ "command": "./hooks/format.sh"
+ }]
+ }
+ });
+
+ insert_cursor_hook_entry(&mut json_content);
+
+ let pre_tool_use = json_content["hooks"]["preToolUse"].as_array().unwrap();
+ assert_eq!(pre_tool_use.len(), 2);
+ assert_eq!(pre_tool_use[0]["command"], "./hooks/other.sh");
+ assert_eq!(pre_tool_use[1]["command"], "./hooks/rtk-rewrite.sh");
+
+ // afterFileEdit should be preserved
+ assert!(json_content["hooks"]["afterFileEdit"].is_array());
+ }
+
+ #[test]
+ fn test_remove_cursor_hook_from_json() {
+ let mut json_content = serde_json::json!({
+ "version": 1,
+ "hooks": {
+ "preToolUse": [
+ { "command": "./hooks/other.sh", "matcher": "Shell" },
+ { "command": "./hooks/rtk-rewrite.sh", "matcher": "Shell" }
+ ]
+ }
+ });
+
+ let removed = remove_cursor_hook_from_json(&mut json_content);
+ assert!(removed);
+
+ let hooks = json_content["hooks"]["preToolUse"].as_array().unwrap();
+ assert_eq!(hooks.len(), 1);
+ assert_eq!(hooks[0]["command"], "./hooks/other.sh");
+ }
+
+ #[test]
+ fn test_remove_cursor_hook_not_present() {
+ let mut json_content = serde_json::json!({
+ "version": 1,
+ "hooks": {
+ "preToolUse": [
+ { "command": "./hooks/other.sh", "matcher": "Shell" }
+ ]
+ }
+ });
+
+ let removed = remove_cursor_hook_from_json(&mut json_content);
+ assert!(!removed);
+ }
+
+ #[test]
+ fn test_cursor_hook_script_has_guards() {
+ assert!(CURSOR_REWRITE_HOOK.contains("command -v rtk"));
+ assert!(CURSOR_REWRITE_HOOK.contains("command -v jq"));
+ let jq_pos = CURSOR_REWRITE_HOOK.find("command -v jq").unwrap();
+ let rtk_delegate_pos = CURSOR_REWRITE_HOOK.find("rtk rewrite \"$CMD\"").unwrap();
+ assert!(
+ jq_pos < rtk_delegate_pos,
+ "Guards must appear before rtk rewrite delegation"
+ );
+ }
+
+ #[test]
+ fn test_cursor_hook_outputs_cursor_format() {
+ assert!(CURSOR_REWRITE_HOOK.contains("\"permission\": \"allow\""));
+ assert!(CURSOR_REWRITE_HOOK.contains("\"updated_input\""));
+ assert!(!CURSOR_REWRITE_HOOK.contains("hookSpecificOutput"));
+ }
}
diff --git a/src/json_cmd.rs b/src/json_cmd.rs
index 76bae3ae3..685c8f622 100644
--- a/src/json_cmd.rs
+++ b/src/json_cmd.rs
@@ -33,8 +33,8 @@ fn validate_json_extension(file: &Path) -> Result<()> {
Ok(())
}
-/// Show JSON structure without values
-pub fn run(file: &Path, max_depth: usize, verbose: u8) -> Result<()> {
+/// Show JSON (compact with values, or schema-only with --schema)
+pub fn run(file: &Path, max_depth: usize, schema_only: bool, verbose: u8) -> Result<()> {
validate_json_extension(file)?;
let timer = tracking::TimedExecution::start();
@@ -45,19 +45,23 @@ pub fn run(file: &Path, max_depth: usize, verbose: u8) -> Result<()> {
let content = fs::read_to_string(file)
.with_context(|| format!("Failed to read file: {}", file.display()))?;
- let schema = filter_json_string(&content, max_depth)?;
- println!("{}", schema);
+ let output = if schema_only {
+ filter_json_string(&content, max_depth)?
+ } else {
+ filter_json_compact(&content, max_depth)?
+ };
+ println!("{}", output);
timer.track(
&format!("cat {}", file.display()),
"rtk json",
&content,
- &schema,
+ &output,
);
Ok(())
}
-/// Show JSON structure from stdin
-pub fn run_stdin(max_depth: usize, verbose: u8) -> Result<()> {
+/// Show JSON from stdin
+pub fn run_stdin(max_depth: usize, schema_only: bool, verbose: u8) -> Result<()> {
let timer = tracking::TimedExecution::start();
if verbose > 0 {
@@ -70,13 +74,107 @@ pub fn run_stdin(max_depth: usize, verbose: u8) -> Result<()> {
.read_to_string(&mut content)
.context("Failed to read from stdin")?;
- let schema = filter_json_string(&content, max_depth)?;
- println!("{}", schema);
- timer.track("cat - (stdin)", "rtk json -", &content, &schema);
+ let output = if schema_only {
+ filter_json_string(&content, max_depth)?
+ } else {
+ filter_json_compact(&content, max_depth)?
+ };
+ println!("{}", output);
+ timer.track("cat - (stdin)", "rtk json -", &content, &output);
Ok(())
}
-/// Parse a JSON string and return its schema representation.
+/// Parse a JSON string and return compact representation with values preserved.
+/// Long strings are truncated, arrays are summarized.
+pub fn filter_json_compact(json_str: &str, max_depth: usize) -> Result {
+ let value: Value = serde_json::from_str(json_str).context("Failed to parse JSON")?;
+ Ok(compact_json(&value, 0, max_depth))
+}
+
+fn compact_json(value: &Value, depth: usize, max_depth: usize) -> String {
+ let indent = " ".repeat(depth);
+
+ if depth > max_depth {
+ return format!("{}...", indent);
+ }
+
+ match value {
+ Value::Null => format!("{}null", indent),
+ Value::Bool(b) => format!("{}{}", indent, b),
+ Value::Number(n) => format!("{}{}", indent, n),
+ Value::String(s) => {
+ if s.len() > 80 {
+ format!("{}\"{}...\"", indent, &s[..77])
+ } else {
+ format!("{}\"{}\"", indent, s)
+ }
+ }
+ Value::Array(arr) => {
+ if arr.is_empty() {
+ format!("{}[]", indent)
+ } else if arr.len() > 5 {
+ let first = compact_json(&arr[0], depth + 1, max_depth);
+ format!("{}[{}, ... +{} more]", indent, first.trim(), arr.len() - 1)
+ } else {
+ let items: Vec = arr
+ .iter()
+ .map(|v| compact_json(v, depth + 1, max_depth))
+ .collect();
+ let all_simple = arr.iter().all(|v| {
+ matches!(
+ v,
+ Value::Null | Value::Bool(_) | Value::Number(_) | Value::String(_)
+ )
+ });
+ if all_simple {
+ let inline: Vec<&str> = items.iter().map(|s| s.trim()).collect();
+ format!("{}[{}]", indent, inline.join(", "))
+ } else {
+ let mut lines = vec![format!("{}[", indent)];
+ for item in &items {
+ lines.push(format!("{},", item));
+ }
+ lines.push(format!("{}]", indent));
+ lines.join("\n")
+ }
+ }
+ }
+ Value::Object(map) => {
+ if map.is_empty() {
+ format!("{}{{}}", indent)
+ } else {
+ let mut lines = vec![format!("{}{{", indent)];
+ let mut keys: Vec<_> = map.keys().collect();
+ keys.sort();
+
+ for (i, key) in keys.iter().enumerate() {
+ let val = &map[*key];
+ let is_simple = matches!(
+ val,
+ Value::Null | Value::Bool(_) | Value::Number(_) | Value::String(_)
+ );
+
+ if is_simple {
+ let val_str = compact_json(val, 0, max_depth);
+ lines.push(format!("{} {}: {}", indent, key, val_str.trim()));
+ } else {
+ lines.push(format!("{} {}:", indent, key));
+ lines.push(compact_json(val, depth + 1, max_depth));
+ }
+
+ if i >= 20 {
+ lines.push(format!("{} ... +{} more keys", indent, keys.len() - i - 1));
+ break;
+ }
+ }
+ lines.push(format!("{}}}", indent));
+ lines.join("\n")
+ }
+ }
+ }
+}
+
+/// Parse a JSON string and return its schema representation (types only, no values).
/// Useful for piping JSON from other commands (e.g., `gh api`, `curl`).
pub fn filter_json_string(json_str: &str, max_depth: usize) -> Result {
let value: Value = serde_json::from_str(json_str).context("Failed to parse JSON")?;
diff --git a/src/learn/detector.rs b/src/learn/detector.rs
index 87f0e1627..214076687 100644
--- a/src/learn/detector.rs
+++ b/src/learn/detector.rs
@@ -5,6 +5,7 @@ use regex::Regex;
pub enum ErrorType {
UnknownFlag,
CommandNotFound,
+ #[allow(dead_code)]
WrongSyntax,
WrongPath,
MissingArg,
@@ -229,9 +230,7 @@ pub fn find_corrections(commands: &[CommandExecution]) -> Vec {
}
// Look ahead for correction within CORRECTION_WINDOW
- for j in (i + 1)..std::cmp::min(i + 1 + CORRECTION_WINDOW, commands.len()) {
- let candidate = &commands[j];
-
+ for candidate in commands.iter().skip(i + 1).take(CORRECTION_WINDOW) {
let similarity = command_similarity(&cmd.command, &candidate.command);
// Must meet minimum similarity
diff --git a/src/lint_cmd.rs b/src/lint_cmd.rs
index 267a21e9d..ca136e67c 100644
--- a/src/lint_cmd.rs
+++ b/src/lint_cmd.rs
@@ -1,3 +1,4 @@
+use crate::config;
use crate::mypy_cmd;
use crate::ruff_cmd;
use crate::tracking;
@@ -35,11 +36,13 @@ struct PylintDiagnostic {
module: String,
#[allow(dead_code)]
obj: String,
+ #[allow(dead_code)]
line: usize,
#[allow(dead_code)]
column: usize,
path: String,
symbol: String, // rule code like "unused-variable"
+ #[allow(dead_code)]
message: String,
#[serde(rename = "message-id")]
message_id: String, // e.g., "W0612"
@@ -169,7 +172,7 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> {
// Check if process was killed by signal (SIGABRT, SIGKILL, etc.)
if !output.status.success() && output.status.code().is_none() {
let stderr = String::from_utf8_lossy(&output.stderr);
- eprintln!("⚠️ Linter process terminated abnormally (possibly out of memory)");
+ eprintln!("[warn] Linter process terminated abnormally (possibly out of memory)");
if !stderr.is_empty() {
eprintln!(
"stderr: {}",
@@ -191,7 +194,7 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> {
if !stdout.trim().is_empty() {
ruff_cmd::filter_ruff_check_json(&stdout)
} else {
- "✓ Ruff: No issues found".to_string()
+ "Ruff: No issues found".to_string()
}
}
"pylint" => filter_pylint_json(&stdout),
@@ -234,7 +237,7 @@ fn filter_eslint_json(output: &str) -> String {
return format!(
"ESLint output (JSON parse failed: {})\n{}",
e,
- truncate(output, 500)
+ truncate(output, config::limits().passthrough_max_chars)
);
}
};
@@ -245,7 +248,7 @@ fn filter_eslint_json(output: &str) -> String {
let total_files = results.iter().filter(|r| !r.messages.is_empty()).count();
if total_errors == 0 && total_warnings == 0 {
- return "✓ ESLint: No issues found".to_string();
+ return "ESLint: No issues found".to_string();
}
// Group messages by rule
@@ -326,13 +329,13 @@ fn filter_pylint_json(output: &str) -> String {
return format!(
"Pylint output (JSON parse failed: {})\n{}",
e,
- truncate(output, 500)
+ truncate(output, config::limits().passthrough_max_chars)
);
}
};
if diagnostics.is_empty() {
- return "✓ Pylint: No issues found".to_string();
+ return "Pylint: No issues found".to_string();
}
// Count by type
@@ -451,7 +454,7 @@ fn filter_generic_lint(output: &str) -> String {
}
if errors == 0 && warnings == 0 {
- return "✓ Lint: No issues found".to_string();
+ return "Lint: No issues found".to_string();
}
let mut result = String::new();
@@ -553,7 +556,7 @@ mod tests {
fn test_filter_pylint_json_no_issues() {
let output = "[]";
let result = filter_pylint_json(output);
- assert!(result.contains("✓ Pylint"));
+ assert!(result.contains("Pylint"));
assert!(result.contains("No issues found"));
}
diff --git a/src/log_cmd.rs b/src/log_cmd.rs
index 01670af7b..0deadf904 100644
--- a/src/log_cmd.rs
+++ b/src/log_cmd.rs
@@ -105,23 +105,23 @@ fn analyze_logs(content: &str) -> String {
let total_warnings: usize = warn_counts.values().sum();
let total_info: usize = info_counts.values().sum();
- result.push(format!("📊 Log Summary"));
+ result.push("Log Summary".to_string());
result.push(format!(
- " ❌ {} errors ({} unique)",
+ " [error] {} errors ({} unique)",
total_errors,
error_counts.len()
));
result.push(format!(
- " ⚠️ {} warnings ({} unique)",
+ " [warn] {} warnings ({} unique)",
total_warnings,
warn_counts.len()
));
- result.push(format!(" ℹ️ {} info messages", total_info));
+ result.push(format!(" [info] {} info messages", total_info));
result.push(String::new());
// Errors with counts
if !unique_errors.is_empty() {
- result.push("❌ ERRORS:".to_string());
+ result.push("[ERRORS]".to_string());
// Sort by count
let mut error_list: Vec<_> = error_counts.iter().collect();
@@ -163,7 +163,7 @@ fn analyze_logs(content: &str) -> String {
// Warnings with counts
if !unique_warnings.is_empty() {
- result.push("⚠️ WARNINGS:".to_string());
+ result.push("[WARNINGS]".to_string());
let mut warn_list: Vec<_> = warn_counts.iter().collect();
warn_list.sort_by(|a, b| b.1.cmp(a.1));
diff --git a/src/ls.rs b/src/ls.rs
index e02c215cc..d121123a7 100644
--- a/src/ls.rs
+++ b/src/ls.rs
@@ -203,7 +203,7 @@ fn compact_ls(raw: &str, show_all: bool) -> String {
// Summary line
out.push('\n');
- let mut summary = format!("📊 {} files, {} dirs", files.len(), dirs.len());
+ let mut summary = format!("{} files, {} dirs", files.len(), dirs.len());
if !by_ext.is_empty() {
let mut ext_counts: Vec<_> = by_ext.iter().collect();
ext_counts.sort_by(|a, b| b.1.cmp(a.1));
@@ -291,7 +291,7 @@ mod tests {
-rw-r--r-- 1 user staff 5678 Jan 1 12:00 lib.rs\n\
-rw-r--r-- 1 user staff 100 Jan 1 12:00 Cargo.toml\n";
let output = compact_ls(input, false);
- assert!(output.contains("📊 3 files, 1 dirs"));
+ assert!(output.contains("3 files, 1 dirs"));
assert!(output.contains(".rs"));
assert!(output.contains(".toml"));
}
diff --git a/src/main.rs b/src/main.rs
index a039bc55f..f3a07ff15 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -27,6 +27,7 @@ mod grep_cmd;
mod gt_cmd;
mod hook_audit_cmd;
mod hook_check;
+mod hook_cmd;
mod init;
mod integrity;
mod json_cmd;
@@ -39,6 +40,7 @@ mod mypy_cmd;
mod next_cmd;
mod npm_cmd;
mod parser;
+mod permissions;
mod pip_cmd;
mod playwright_cmd;
mod pnpm_cmd;
@@ -46,8 +48,11 @@ mod prettier_cmd;
mod prisma_cmd;
mod psql_cmd;
mod pytest_cmd;
+mod rake_cmd;
mod read;
mod rewrite_cmd;
+mod rspec_cmd;
+mod rubocop_cmd;
mod ruff_cmd;
mod runner;
mod session_cmd;
@@ -67,10 +72,23 @@ mod wget_cmd;
use anyhow::{Context, Result};
use clap::error::ErrorKind;
-use clap::{Parser, Subcommand};
+use clap::{Parser, Subcommand, ValueEnum};
use std::ffi::OsString;
use std::path::{Path, PathBuf};
+/// Target agent for hook installation.
+#[derive(Debug, Clone, Copy, PartialEq, ValueEnum)]
+pub enum AgentTarget {
+ /// Claude Code (default)
+ Claude,
+ /// Cursor Agent (editor and CLI)
+ Cursor,
+ /// Windsurf IDE (Cascade)
+ Windsurf,
+ /// Cline / Roo Code (VS Code)
+ Cline,
+}
+
#[derive(Parser)]
#[command(
name = "rtk",
@@ -224,13 +242,16 @@ enum Commands {
command: Vec,
},
- /// Show JSON structure without values
+ /// Show JSON (compact values, or schema-only with --schema)
Json {
/// JSON file
file: PathBuf,
/// Max depth
#[arg(short, long, default_value = "5")]
depth: usize,
+ /// Show structure only (strip all values)
+ #[arg(long)]
+ schema: bool,
},
/// Summarize project dependencies
@@ -307,7 +328,7 @@ enum Commands {
#[arg(short = 'l', long, default_value = "80")]
max_len: usize,
/// Max results to show
- #[arg(short, long, default_value = "50")]
+ #[arg(short, long, default_value = "200")]
max: usize,
/// Show only match context (not full line)
#[arg(short, long)]
@@ -323,9 +344,9 @@ enum Commands {
extra_args: Vec,
},
- /// Initialize rtk instructions in CLAUDE.md
+ /// Initialize rtk instructions for assistant CLI usage
Init {
- /// Add to global ~/.claude/CLAUDE.md instead of local
+ /// Add to global assistant config directory instead of local project file
#[arg(short, long)]
global: bool,
@@ -333,6 +354,14 @@ enum Commands {
#[arg(long)]
opencode: bool,
+ /// Initialize for Gemini CLI instead of Claude Code
+ #[arg(long)]
+ gemini: bool,
+
+ /// Target agent to install hooks for (default: claude)
+ #[arg(long, value_enum)]
+ agent: Option,
+
/// Show current configuration
#[arg(long)]
show: bool,
@@ -353,18 +382,22 @@ enum Commands {
#[arg(long = "no-patch", group = "patch")]
no_patch: bool,
- /// Remove all RTK artifacts (hook, RTK.md, CLAUDE.md reference, settings.json entry)
+ /// Remove RTK artifacts for the selected assistant mode
#[arg(long)]
uninstall: bool,
+
+ /// Target Codex CLI (uses AGENTS.md + RTK.md, no Claude hook patching)
+ #[arg(long)]
+ codex: bool,
},
/// Download with compact output (strips progress bars)
Wget {
/// URL to download
url: String,
- /// Output to stdout instead of file
- #[arg(short = 'O', long)]
- stdout: bool,
+ /// Output file (-O - for stdout)
+ #[arg(short = 'O', long = "output-document", allow_hyphen_values = true)]
+ output: Option,
/// Additional wget arguments
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
args: Vec,
@@ -616,6 +649,27 @@ enum Commands {
args: Vec,
},
+ /// Rake/Rails test with compact Minitest output (Ruby)
+ Rake {
+ /// Rake arguments (e.g., test, test TEST=path/to/test.rb)
+ #[arg(trailing_var_arg = true, allow_hyphen_values = true)]
+ args: Vec,
+ },
+
+ /// RuboCop linter with compact output (Ruby)
+ Rubocop {
+ /// RuboCop arguments (e.g., --auto-correct, -A)
+ #[arg(trailing_var_arg = true, allow_hyphen_values = true)]
+ args: Vec,
+ },
+
+ /// RSpec test runner with compact output (Rails/Ruby)
+ Rspec {
+ /// RSpec arguments (e.g., spec/models, --tag focus)
+ #[arg(trailing_var_arg = true, allow_hyphen_values = true)]
+ args: Vec,
+ },
+
/// Pip package manager with compact output (auto-detects uv)
Pip {
/// Pip arguments (e.g., list, outdated, install)
@@ -671,6 +725,20 @@ enum Commands {
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
args: Vec,
},
+
+ /// Hook processors for LLM CLI tools (Gemini CLI, Copilot, etc.)
+ Hook {
+ #[command(subcommand)]
+ command: HookCommands,
+ },
+}
+
+#[derive(Subcommand)]
+enum HookCommands {
+ /// Process Gemini CLI BeforeTool hook (reads JSON from stdin)
+ Gemini,
+ /// Process Copilot preToolUse hook (VS Code + Copilot CLI, reads JSON from stdin)
+ Copilot,
}
#[derive(Subcommand)]
@@ -699,25 +767,25 @@ enum GitCommands {
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
args: Vec,
},
- /// Add files → "ok ✓"
+ /// Add files → "ok"
Add {
/// Files and flags to add (supports all git add flags like -A, -p, --all, etc)
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
args: Vec,
},
- /// Commit → "ok ✓ \"
+ /// Commit → "ok \"
Commit {
/// Git commit arguments (supports -a, -m, --amend, --allow-empty, etc)
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
args: Vec,
},
- /// Push → "ok ✓ \"
+ /// Push → "ok \"
Push {
/// Git push arguments (supports -u, remote, branch, etc.)
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
args: Vec,
},
- /// Pull → "ok ✓ \"
+ /// Pull → "ok \"
Pull {
/// Git pull arguments (supports --rebase, remote, branch, etc.)
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
@@ -1187,11 +1255,11 @@ enum GtCommands {
fn shell_split(input: &str) -> Vec {
let mut tokens = Vec::new();
let mut current = String::new();
- let mut chars = input.chars().peekable();
+ let chars = input.chars();
let mut in_single = false;
let mut in_double = false;
- while let Some(c) = chars.next() {
+ for c in chars {
match c {
'\'' if !in_double => in_single = !in_single,
'"' if !in_single => in_double = !in_double,
@@ -1469,11 +1537,15 @@ fn main() -> Result<()> {
runner::run_test(&cmd, cli.verbose)?;
}
- Commands::Json { file, depth } => {
+ Commands::Json {
+ file,
+ depth,
+ schema,
+ } => {
if file == Path::new("-") {
- json_cmd::run_stdin(depth, cli.verbose)?;
+ json_cmd::run_stdin(depth, schema, cli.verbose)?;
} else {
- json_cmd::run(&file, depth, cli.verbose)?;
+ json_cmd::run(&file, depth, schema, cli.verbose)?;
}
}
@@ -1616,20 +1688,36 @@ fn main() -> Result<()> {
Commands::Init {
global,
opencode,
+ gemini,
+ agent,
show,
claude_md,
hook_only,
auto_patch,
no_patch,
uninstall,
+ codex,
} => {
if show {
- init::show_config()?;
+ init::show_config(codex)?;
} else if uninstall {
- init::uninstall(global, cli.verbose)?;
+ let cursor = agent == Some(AgentTarget::Cursor);
+ init::uninstall(global, gemini, codex, cursor, cli.verbose)?;
+ } else if gemini {
+ let patch_mode = if auto_patch {
+ init::PatchMode::Auto
+ } else if no_patch {
+ init::PatchMode::Skip
+ } else {
+ init::PatchMode::Ask
+ };
+ init::run_gemini(global, hook_only, patch_mode, cli.verbose)?;
} else {
let install_opencode = opencode;
let install_claude = !opencode;
+ let install_cursor = agent == Some(AgentTarget::Cursor);
+ let install_windsurf = agent == Some(AgentTarget::Windsurf);
+ let install_cline = agent == Some(AgentTarget::Cline);
let patch_mode = if auto_patch {
init::PatchMode::Auto
@@ -1642,19 +1730,30 @@ fn main() -> Result<()> {
global,
install_claude,
install_opencode,
+ install_cursor,
+ install_windsurf,
+ install_cline,
claude_md,
hook_only,
+ codex,
patch_mode,
cli.verbose,
)?;
}
}
- Commands::Wget { url, stdout, args } => {
- if stdout {
+ Commands::Wget { url, output, args } => {
+ if output.as_deref() == Some("-") {
wget_cmd::run_stdout(&url, &args, cli.verbose)?;
} else {
- wget_cmd::run(&url, &args, cli.verbose)?;
+ // Pass -O through to wget via args
+ let mut all_args = Vec::new();
+ if let Some(out_file) = &output {
+ all_args.push("-O".to_string());
+ all_args.push(out_file.clone());
+ }
+ all_args.extend(args);
+ wget_cmd::run(&url, &all_args, cli.verbose)?;
}
}
@@ -1934,6 +2033,18 @@ fn main() -> Result<()> {
mypy_cmd::run(&args, cli.verbose)?;
}
+ Commands::Rake { args } => {
+ rake_cmd::run(&args, cli.verbose)?;
+ }
+
+ Commands::Rubocop { args } => {
+ rubocop_cmd::run(&args, cli.verbose)?;
+ }
+
+ Commands::Rspec { args } => {
+ rspec_cmd::run(&args, cli.verbose)?;
+ }
+
Commands::Pip { args } => {
pip_cmd::run(&args, cli.verbose)?;
}
@@ -1989,6 +2100,15 @@ fn main() -> Result<()> {
hook_audit_cmd::run(since, cli.verbose)?;
}
+ Commands::Hook { command } => match command {
+ HookCommands::Gemini => {
+ hook_cmd::run_gemini()?;
+ }
+ HookCommands::Copilot => {
+ hook_cmd::run_copilot()?;
+ }
+ },
+
Commands::Rewrite { args } => {
let cmd = args.join(" ");
rewrite_cmd::run(&cmd)?;
@@ -2188,6 +2308,9 @@ fn is_operational_command(cmd: &Commands) -> bool {
| Commands::Curl { .. }
| Commands::Ruff { .. }
| Commands::Pytest { .. }
+ | Commands::Rake { .. }
+ | Commands::Rubocop { .. }
+ | Commands::Rspec { .. }
| Commands::Pip { .. }
| Commands::Go { .. }
| Commands::Gradle { .. }
diff --git a/src/next_cmd.rs b/src/next_cmd.rs
index 53564d22c..e958258d3 100644
--- a/src/next_cmd.rs
+++ b/src/next_cmd.rs
@@ -125,14 +125,14 @@ fn filter_next_build(output: &str) -> String {
// Build filtered output
let mut result = String::new();
- result.push_str("⚡ Next.js Build\n");
+ result.push_str("Next.js Build\n");
result.push_str("═══════════════════════════════════════\n");
if already_built && routes_total == 0 {
- result.push_str("✓ Already built (using cache)\n\n");
+ result.push_str("Already built (using cache)\n\n");
} else if routes_total > 0 {
result.push_str(&format!(
- "✓ {} routes ({} static, {} dynamic)\n\n",
+ "{} routes ({} static, {} dynamic)\n\n",
routes_total, routes_static, routes_dynamic
));
}
@@ -146,7 +146,7 @@ fn filter_next_build(output: &str) -> String {
for (route, size, pct_change) in bundles.iter().take(10) {
let warning_marker = if let Some(pct) = pct_change {
if *pct > 10.0 {
- format!(" ⚠️ (+{:.0}%)", pct)
+ format!(" [warn] (+{:.0}%)", pct)
} else {
String::new()
}
@@ -219,7 +219,7 @@ Route (app) Size First Load JS
✓ Built in 34.2s
"#;
let result = filter_next_build(output);
- assert!(result.contains("⚡ Next.js Build"));
+ assert!(result.contains("Next.js Build"));
assert!(result.contains("routes"));
assert!(!result.contains("Creating an optimized")); // Should filter verbose logs
}
diff --git a/src/npm_cmd.rs b/src/npm_cmd.rs
index e4316ad25..1d35d41f7 100644
--- a/src/npm_cmd.rs
+++ b/src/npm_cmd.rs
@@ -160,7 +160,7 @@ fn filter_npm_output(output: &str) -> String {
}
if result.is_empty() {
- "ok ✓".to_string()
+ "ok".to_string()
} else {
result.join("\n")
}
@@ -231,6 +231,6 @@ npm notice
fn test_filter_npm_output_empty() {
let output = "\n\n\n";
let result = filter_npm_output(output);
- assert_eq!(result, "ok ✓");
+ assert_eq!(result, "ok");
}
}
diff --git a/src/parser/README.md b/src/parser/README.md
index 6f0d2420a..db7b20ed7 100644
--- a/src/parser/README.md
+++ b/src/parser/README.md
@@ -70,7 +70,7 @@ impl OutputParser for VitestParser {
)
} else {
// Tier 3: Passthrough
- ParseResult::Passthrough(truncate_output(input, 500))
+ ParseResult::Passthrough(truncate_output(input, 2000))
}
}
}
@@ -152,7 +152,7 @@ For build tools (next, webpack, vite, cargo, etc.)
- Human-readable
### Ultra (verbosity=2+)
-- Symbols: ✓✗⚠📦⬆️
+- Symbols: ✓✗⚠ pkg: ^
- Ultra-compressed
- 30-50% token reduction
diff --git a/src/parser/error.rs b/src/parser/error.rs
index eee4f343c..e3e48f073 100644
--- a/src/parser/error.rs
+++ b/src/parser/error.rs
@@ -2,6 +2,7 @@
use thiserror::Error;
#[derive(Error, Debug)]
+#[allow(dead_code)]
pub enum ParseError {
#[error("JSON parse failed at line {line}, column {col}: {msg}")]
JsonError {
diff --git a/src/parser/formatter.rs b/src/parser/formatter.rs
index 12467b4a9..b41280e26 100644
--- a/src/parser/formatter.rs
+++ b/src/parser/formatter.rs
@@ -51,13 +51,9 @@ impl TokenFormatter for TestResult {
lines.push(String::new());
for (idx, failure) in self.failures.iter().enumerate().take(5) {
lines.push(format!("{}. {}", idx + 1, failure.test_name));
- let error_preview: String = failure
- .error_message
- .lines()
- .take(2)
- .collect::>()
- .join(" ");
- lines.push(format!(" {}", error_preview));
+ for line in failure.error_message.lines() {
+ lines.push(format!(" {}", line));
+ }
}
if self.failures.len() > 5 {
@@ -105,7 +101,7 @@ impl TokenFormatter for TestResult {
fn format_ultra(&self) -> String {
format!(
- "✓{} ✗{} ⊘{} ({}ms)",
+ "[ok]{} [x]{} [skip]{} ({}ms)",
self.passed,
self.failed,
self.skipped,
@@ -161,9 +157,9 @@ impl TokenFormatter for LintResult {
lines.push("\nIssues:".to_string());
for issue in self.issues.iter().take(20) {
let severity_symbol = match issue.severity {
- LintSeverity::Error => "✗",
- LintSeverity::Warning => "⚠",
- LintSeverity::Info => "ℹ",
+ LintSeverity::Error => "[x]",
+ LintSeverity::Warning => "[!]",
+ LintSeverity::Info => "[info]",
};
lines.push(format!(
"{} {}:{}:{} [{}] {}",
@@ -186,7 +182,7 @@ impl TokenFormatter for LintResult {
fn format_ultra(&self) -> String {
format!(
- "✗{} ⚠{} 📁{}",
+ "[x]{} [!]{} {}F",
self.errors, self.warnings, self.files_with_issues
)
}
@@ -195,7 +191,7 @@ impl TokenFormatter for LintResult {
impl TokenFormatter for DependencyState {
fn format_compact(&self) -> String {
if self.outdated_count == 0 {
- return "All packages up-to-date ✓".to_string();
+ return "All packages up-to-date".to_string();
}
let mut lines = vec![format!(
@@ -251,13 +247,13 @@ impl TokenFormatter for DependencyState {
}
fn format_ultra(&self) -> String {
- format!("📦{} ⬆️{}", self.total_packages, self.outdated_count)
+ format!("pkg:{} ^{}", self.total_packages, self.outdated_count)
}
}
impl TokenFormatter for BuildOutput {
fn format_compact(&self) -> String {
- let status = if self.success { "✓" } else { "✗" };
+ let status = if self.success { "[ok]" } else { "[x]" };
let mut lines = vec![format!(
"{} Build: {} errors, {} warnings",
status, self.errors, self.warnings
@@ -324,9 +320,9 @@ impl TokenFormatter for BuildOutput {
}
fn format_ultra(&self) -> String {
- let status = if self.success { "✓" } else { "✗" };
+ let status = if self.success { "[ok]" } else { "[x]" };
format!(
- "{} ✗{} ⚠{} ({}ms)",
+ "{} [x]{} [!]{} ({}ms)",
status,
self.errors,
self.warnings,
@@ -334,3 +330,88 @@ impl TokenFormatter for BuildOutput {
)
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::parser::types::{TestFailure, TestResult};
+
+ fn make_failure(name: &str, error: &str) -> TestFailure {
+ TestFailure {
+ test_name: name.to_string(),
+ file_path: "tests/e2e.spec.ts".to_string(),
+ error_message: error.to_string(),
+ stack_trace: None,
+ }
+ }
+
+ fn make_result(passed: usize, failures: Vec) -> TestResult {
+ TestResult {
+ total: passed + failures.len(),
+ passed,
+ failed: failures.len(),
+ skipped: 0,
+ duration_ms: Some(1500),
+ failures,
+ }
+ }
+
+ // RED: format_compact must show the full error message, not just 2 lines.
+ // Playwright errors contain the expected/received diff and call log starting
+ // at line 3+. Truncating to 2 lines leaves the agent with no debug info.
+ #[test]
+ fn test_compact_shows_full_error_message() {
+ let error = "Error: expect(locator).toHaveText(expected)\n\nExpected: 'Submit'\nReceived: 'Loading'\n\nCall log:\n - waiting for getByRole('button', { name: 'Submit' })";
+ let result = make_result(5, vec![make_failure("should click submit", error)]);
+
+ let output = result.format_compact();
+
+ assert!(
+ output.contains("Expected: 'Submit'"),
+ "format_compact must preserve expected/received diff\nGot:\n{output}"
+ );
+ assert!(
+ output.contains("Received: 'Loading'"),
+ "format_compact must preserve received value\nGot:\n{output}"
+ );
+ assert!(
+ output.contains("Call log:"),
+ "format_compact must preserve call log\nGot:\n{output}"
+ );
+ }
+
+ // RED: summary line stays compact regardless of failure detail
+ #[test]
+ fn test_compact_summary_line_is_concise() {
+ let result = make_result(28, vec![make_failure("test", "some error")]);
+ let output = result.format_compact();
+ let first_line = output.lines().next().unwrap_or("");
+ assert!(
+ first_line.contains("28") && first_line.contains("1"),
+ "First line must show pass/fail counts, got: {first_line}"
+ );
+ }
+
+ // RED: all-pass output stays compact (no failure detail bloat)
+ #[test]
+ fn test_compact_all_pass_is_one_line() {
+ let result = make_result(10, vec![]);
+ let output = result.format_compact();
+ assert!(
+ output.lines().count() <= 3,
+ "All-pass output should be compact, got {} lines:\n{output}",
+ output.lines().count()
+ );
+ }
+
+ // RED: error_message with only 1 line still works (no trailing noise)
+ #[test]
+ fn test_compact_single_line_error_no_trailing_noise() {
+ let result = make_result(0, vec![make_failure("should work", "Timeout exceeded")]);
+ let output = result.format_compact();
+ assert!(
+ output.contains("Timeout exceeded"),
+ "Single-line error must appear\nGot:\n{output}"
+ );
+ }
+}
diff --git a/src/parser/mod.rs b/src/parser/mod.rs
index 5561ec68f..0af1de198 100644
--- a/src/parser/mod.rs
+++ b/src/parser/mod.rs
@@ -29,6 +29,7 @@ pub enum ParseResult {
impl ParseResult {
/// Unwrap the parsed data, panicking on Passthrough
+ #[allow(dead_code)]
pub fn unwrap(self) -> T {
match self {
ParseResult::Full(data) => data,
@@ -38,6 +39,7 @@ impl ParseResult {
}
/// Get the tier level (1 = Full, 2 = Degraded, 3 = Passthrough)
+ #[allow(dead_code)]
pub fn tier(&self) -> u8 {
match self {
ParseResult::Full(_) => 1,
@@ -47,11 +49,13 @@ impl ParseResult {
}
/// Check if parsing succeeded (Full or Degraded)
+ #[allow(dead_code)]
pub fn is_ok(&self) -> bool {
!matches!(self, ParseResult::Passthrough(_))
}
/// Map the parsed data while preserving tier
+ #[allow(dead_code)]
pub fn map(self, f: F) -> ParseResult
where
F: FnOnce(T) -> U,
@@ -64,6 +68,7 @@ impl ParseResult {
}
/// Get warnings if Degraded tier
+ #[allow(dead_code)]
pub fn warnings(&self) -> Vec {
match self {
ParseResult::Degraded(_, warnings) => warnings.clone(),
@@ -85,16 +90,23 @@ pub trait OutputParser: Sized {
fn parse(input: &str) -> ParseResult;
/// Parse with explicit tier preference (for testing/debugging)
+ #[allow(dead_code)]
fn parse_with_tier(input: &str, max_tier: u8) -> ParseResult {
let result = Self::parse(input);
if result.tier() > max_tier {
// Force degradation to passthrough if exceeds max tier
- return ParseResult::Passthrough(truncate_output(input, 500));
+ return ParseResult::Passthrough(truncate_passthrough(input));
}
result
}
}
+/// Truncate output using configured passthrough limit
+pub fn truncate_passthrough(output: &str) -> String {
+ let max_chars = crate::config::limits().passthrough_max_chars;
+ truncate_output(output, max_chars)
+}
+
/// Truncate output to max length with ellipsis
pub fn truncate_output(output: &str, max_chars: usize) -> String {
let chars: Vec = output.chars().collect();
diff --git a/src/parser/types.rs b/src/parser/types.rs
index 2339e2d4d..4fa6b804f 100644
--- a/src/parser/types.rs
+++ b/src/parser/types.rs
@@ -23,6 +23,7 @@ pub struct TestFailure {
/// Linting result (eslint, biome, tsc, etc.)
#[derive(Debug, Clone, Serialize, Deserialize)]
+#[allow(dead_code)]
pub struct LintResult {
pub total_files: usize,
pub files_with_issues: usize,
@@ -33,6 +34,7 @@ pub struct LintResult {
}
#[derive(Debug, Clone, Serialize, Deserialize)]
+#[allow(dead_code)]
pub struct LintIssue {
pub file_path: String,
pub line: usize,
@@ -43,6 +45,7 @@ pub struct LintIssue {
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
+#[allow(dead_code)]
pub enum LintSeverity {
Error,
Warning,
@@ -68,6 +71,7 @@ pub struct Dependency {
/// Build output (next, webpack, vite, cargo, etc.)
#[derive(Debug, Clone, Serialize, Deserialize)]
+#[allow(dead_code)]
pub struct BuildOutput {
pub success: bool,
pub duration_ms: Option,
@@ -78,6 +82,7 @@ pub struct BuildOutput {
}
#[derive(Debug, Clone, Serialize, Deserialize)]
+#[allow(dead_code)]
pub struct BundleInfo {
pub name: String,
pub size_bytes: u64,
@@ -85,6 +90,7 @@ pub struct BundleInfo {
}
#[derive(Debug, Clone, Serialize, Deserialize)]
+#[allow(dead_code)]
pub struct RouteInfo {
pub path: String,
pub size_kb: f64,
@@ -93,6 +99,7 @@ pub struct RouteInfo {
/// Git operation result
#[derive(Debug, Clone, Serialize, Deserialize)]
+#[allow(dead_code)]
pub struct GitResult {
pub operation: String,
pub files_changed: usize,
@@ -102,6 +109,7 @@ pub struct GitResult {
}
#[derive(Debug, Clone, Serialize, Deserialize)]
+#[allow(dead_code)]
pub struct GitCommit {
pub hash: String,
pub author: String,
@@ -111,6 +119,7 @@ pub struct GitCommit {
/// Generic command output (for tools without specific types)
#[derive(Debug, Clone, Serialize, Deserialize)]
+#[allow(dead_code)]
pub struct GenericOutput {
pub exit_code: i32,
pub stdout: String,
diff --git a/src/permissions.rs b/src/permissions.rs
new file mode 100644
index 000000000..52fad6a48
--- /dev/null
+++ b/src/permissions.rs
@@ -0,0 +1,461 @@
+use serde_json::Value;
+use std::path::PathBuf;
+
+/// Verdict from checking a command against Claude Code's permission rules.
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub enum PermissionVerdict {
+ /// No deny/ask rules matched — safe to auto-allow.
+ Allow,
+ /// A deny rule matched — pass through to Claude Code's native deny handling.
+ Deny,
+ /// An ask rule matched — rewrite the command but let Claude Code prompt the user.
+ Ask,
+}
+
+/// Check `cmd` against Claude Code's deny/ask permission rules.
+///
+/// Returns `Allow` when no rules match (preserves existing behavior),
+/// `Deny` when a deny rule matches, or `Ask` when an ask rule matches.
+/// Deny takes priority over Ask if both match the same command.
+pub fn check_command(cmd: &str) -> PermissionVerdict {
+ let (deny_rules, ask_rules) = load_deny_ask_rules();
+ check_command_with_rules(cmd, &deny_rules, &ask_rules)
+}
+
+/// Internal implementation allowing tests to inject rules without file I/O.
+pub(crate) fn check_command_with_rules(
+ cmd: &str,
+ deny_rules: &[String],
+ ask_rules: &[String],
+) -> PermissionVerdict {
+ let segments = split_compound_command(cmd);
+ let mut any_ask = false;
+
+ for segment in &segments {
+ let segment = segment.trim();
+ if segment.is_empty() {
+ continue;
+ }
+
+ for pattern in deny_rules {
+ if command_matches_pattern(segment, pattern) {
+ return PermissionVerdict::Deny;
+ }
+ }
+
+ if !any_ask {
+ for pattern in ask_rules {
+ if command_matches_pattern(segment, pattern) {
+ any_ask = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if any_ask {
+ PermissionVerdict::Ask
+ } else {
+ PermissionVerdict::Allow
+ }
+}
+
+/// Load deny and ask Bash rules from all Claude Code settings files.
+///
+/// Files read (in order, later files do not override earlier ones — all are merged):
+/// 1. `$PROJECT_ROOT/.claude/settings.json`
+/// 2. `$PROJECT_ROOT/.claude/settings.local.json`
+/// 3. `~/.claude/settings.json`
+/// 4. `~/.claude/settings.local.json`
+///
+/// Missing files and malformed JSON are silently skipped.
+fn load_deny_ask_rules() -> (Vec, Vec) {
+ let mut deny_rules = Vec::new();
+ let mut ask_rules = Vec::new();
+
+ for path in get_settings_paths() {
+ let Ok(content) = std::fs::read_to_string(&path) else {
+ continue;
+ };
+ let Ok(json) = serde_json::from_str::(&content) else {
+ continue;
+ };
+ let Some(permissions) = json.get("permissions") else {
+ continue;
+ };
+
+ append_bash_rules(permissions.get("deny"), &mut deny_rules);
+ append_bash_rules(permissions.get("ask"), &mut ask_rules);
+ }
+
+ (deny_rules, ask_rules)
+}
+
+/// Extract Bash-scoped patterns from a JSON array and append them to `target`.
+///
+/// Only rules with a `Bash(...)` prefix are kept. Non-Bash rules (e.g. `Read(...)`) are ignored.
+fn append_bash_rules(rules_value: Option<&Value>, target: &mut Vec) {
+ let Some(arr) = rules_value.and_then(|v| v.as_array()) else {
+ return;
+ };
+ for rule in arr {
+ if let Some(s) = rule.as_str() {
+ if s.starts_with("Bash(") {
+ target.push(extract_bash_pattern(s).to_string());
+ }
+ }
+ }
+}
+
+/// Return the ordered list of Claude Code settings file paths to check.
+fn get_settings_paths() -> Vec {
+ let mut paths = Vec::new();
+
+ if let Some(root) = find_project_root() {
+ paths.push(root.join(".claude").join("settings.json"));
+ paths.push(root.join(".claude").join("settings.local.json"));
+ }
+ if let Some(home) = dirs::home_dir() {
+ paths.push(home.join(".claude").join("settings.json"));
+ paths.push(home.join(".claude").join("settings.local.json"));
+ }
+
+ paths
+}
+
+/// Locate the project root by walking up from CWD looking for `.claude/`.
+///
+/// Falls back to `git rev-parse --show-toplevel` if not found via directory walk.
+fn find_project_root() -> Option {
+ // Fast path: walk up CWD looking for .claude/ — no subprocess needed.
+ let mut dir = std::env::current_dir().ok()?;
+ loop {
+ if dir.join(".claude").exists() {
+ return Some(dir);
+ }
+ if !dir.pop() {
+ break;
+ }
+ }
+
+ // Fallback: git (spawns a subprocess, slower but handles monorepo layouts).
+ let output = std::process::Command::new("git")
+ .args(["rev-parse", "--show-toplevel"])
+ .output()
+ .ok()?;
+
+ if output.status.success() {
+ let path = String::from_utf8(output.stdout).ok()?;
+ return Some(PathBuf::from(path.trim()));
+ }
+
+ None
+}
+
+/// Extract the pattern string from inside `Bash(pattern)`.
+///
+/// Returns the original string unchanged if it does not match the expected format.
+pub(crate) fn extract_bash_pattern(rule: &str) -> &str {
+ if let Some(inner) = rule.strip_prefix("Bash(") {
+ if let Some(pattern) = inner.strip_suffix(')') {
+ return pattern;
+ }
+ }
+ rule
+}
+
+/// Check if `cmd` matches a Claude Code permission pattern.
+///
+/// Pattern forms:
+/// - `*` → matches everything
+/// - `prefix:*` or `prefix *` (trailing `*`, no other wildcards) → prefix match with word boundary
+/// - `* suffix`, `pre * suf` → glob matching where `*` matches any sequence of characters
+/// - `pattern` → exact match or prefix match (cmd must equal pattern or start with `{pattern} `)
+pub(crate) fn command_matches_pattern(cmd: &str, pattern: &str) -> bool {
+ // 1. Global wildcard
+ if pattern == "*" {
+ return true;
+ }
+
+ // 2. Trailing-only wildcard: fast path with word-boundary preservation
+ // Handles: "git push*", "git push *", "sudo:*"
+ if let Some(p) = pattern.strip_suffix('*') {
+ let prefix = p.trim_end_matches(':').trim_end();
+ // Bug 2 fix: after stripping, if prefix is empty or just wildcards, match everything
+ if prefix.is_empty() || prefix == "*" {
+ return true;
+ }
+ // No other wildcards in prefix -> use word-boundary fast path
+ if !prefix.contains('*') {
+ return cmd == prefix || cmd.starts_with(&format!("{} ", prefix));
+ }
+ // Prefix still contains '*' -> fall through to glob matching
+ }
+
+ // 3. Complex wildcards (leading, middle, multiple): glob matching
+ if pattern.contains('*') {
+ return glob_matches(cmd, pattern);
+ }
+
+ // 4. No wildcard: exact match or prefix with word boundary
+ cmd == pattern || cmd.starts_with(&format!("{} ", pattern))
+}
+
+/// Glob-style matching where `*` matches any character sequence (including empty).
+///
+/// Colon syntax normalized: `sudo:*` treated as `sudo *` for word separation.
+fn glob_matches(cmd: &str, pattern: &str) -> bool {
+ // Normalize colon-wildcard syntax: "sudo:*" -> "sudo *", "*:rm" -> "* rm"
+ let normalized = pattern.replace(":*", " *").replace("*:", "* ");
+ let parts: Vec<&str> = normalized.split('*').collect();
+
+ // All-stars pattern (e.g. "***") matches everything
+ if parts.iter().all(|p| p.is_empty()) {
+ return true;
+ }
+
+ let mut search_from = 0;
+
+ for (i, part) in parts.iter().enumerate() {
+ if part.is_empty() {
+ continue;
+ }
+
+ if i == 0 {
+ // First segment: must be prefix (pattern doesn't start with *)
+ if !cmd.starts_with(part) {
+ return false;
+ }
+ search_from = part.len();
+ } else if i == parts.len() - 1 {
+ // Last segment: must be suffix (pattern doesn't end with *)
+ if !cmd[search_from..].ends_with(*part) {
+ return false;
+ }
+ } else {
+ // Middle segment: find next occurrence
+ match cmd[search_from..].find(*part) {
+ Some(pos) => search_from += pos + part.len(),
+ None => return false,
+ }
+ }
+ }
+
+ true
+}
+
+/// Split a compound shell command into individual segments.
+///
+/// Splits on `&&`, `||`, `|`, and `;`. Not a full shell parser — handles common cases.
+fn split_compound_command(cmd: &str) -> Vec<&str> {
+ cmd.split("&&")
+ .flat_map(|s| s.split("||"))
+ .flat_map(|s| s.split(['|', ';']))
+ .collect()
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_parse_bash_pattern() {
+ assert_eq!(
+ extract_bash_pattern("Bash(git push --force)"),
+ "git push --force"
+ );
+ assert_eq!(extract_bash_pattern("Bash(*)"), "*");
+ assert_eq!(extract_bash_pattern("Bash(sudo:*)"), "sudo:*");
+ assert_eq!(extract_bash_pattern("Read(**/.env*)"), "Read(**/.env*)"); // unchanged
+ }
+
+ #[test]
+ fn test_exact_match() {
+ assert!(command_matches_pattern(
+ "git push --force",
+ "git push --force"
+ ));
+ }
+
+ #[test]
+ fn test_wildcard_colon() {
+ assert!(command_matches_pattern("sudo rm -rf /", "sudo:*"));
+ }
+
+ #[test]
+ fn test_no_match() {
+ assert!(!command_matches_pattern("git status", "git push --force"));
+ }
+
+ #[test]
+ fn test_deny_precedence_over_ask() {
+ let deny = vec!["git push --force".to_string()];
+ let ask = vec!["git push --force".to_string()];
+ assert_eq!(
+ check_command_with_rules("git push --force", &deny, &ask),
+ PermissionVerdict::Deny
+ );
+ }
+
+ #[test]
+ fn test_non_bash_rules_ignored() {
+ // Non-Bash rules (e.g. Read, Write) must not match Bash commands.
+ // In load_deny_ask_rules, only Bash( rules are kept — we verify that
+ // extract_bash_pattern returns the original string for non-Bash rules.
+ assert_eq!(extract_bash_pattern("Read(**/.env*)"), "Read(**/.env*)");
+
+ // With empty rule sets (what you get after filtering out non-Bash rules),
+ // verdict is always Allow.
+ assert_eq!(
+ check_command_with_rules("cat .env", &[], &[]),
+ PermissionVerdict::Allow
+ );
+ }
+
+ #[test]
+ fn test_empty_permissions() {
+ assert_eq!(
+ check_command_with_rules("git push --force", &[], &[]),
+ PermissionVerdict::Allow
+ );
+ }
+
+ #[test]
+ fn test_prefix_match() {
+ assert!(command_matches_pattern(
+ "git push --force origin main",
+ "git push --force"
+ ));
+ }
+
+ #[test]
+ fn test_wildcard_all() {
+ assert!(command_matches_pattern("anything at all", "*"));
+ assert!(command_matches_pattern("", "*"));
+ }
+
+ #[test]
+ fn test_no_partial_word_match() {
+ // "git push --forceful" must NOT match pattern "git push --force".
+ assert!(!command_matches_pattern(
+ "git push --forceful",
+ "git push --force"
+ ));
+ }
+
+ #[test]
+ fn test_compound_command_deny() {
+ let deny = vec!["git push --force".to_string()];
+ assert_eq!(
+ check_command_with_rules("git status && git push --force", &deny, &[]),
+ PermissionVerdict::Deny
+ );
+ }
+
+ #[test]
+ fn test_compound_command_ask() {
+ let ask = vec!["git push".to_string()];
+ assert_eq!(
+ check_command_with_rules("git status && git push origin main", &[], &ask),
+ PermissionVerdict::Ask
+ );
+ }
+
+ #[test]
+ fn test_compound_command_deny_overrides_ask() {
+ let deny = vec!["git push --force".to_string()];
+ let ask = vec!["git status".to_string()];
+ // deny in compound cmd takes priority even if ask also matches a segment
+ assert_eq!(
+ check_command_with_rules("git status && git push --force", &deny, &ask),
+ PermissionVerdict::Deny
+ );
+ }
+
+ #[test]
+ fn test_ask_verdict() {
+ let ask = vec!["git push".to_string()];
+ assert_eq!(
+ check_command_with_rules("git push origin main", &[], &ask),
+ PermissionVerdict::Ask
+ );
+ }
+
+ #[test]
+ fn test_sudo_wildcard_no_false_positive() {
+ // "sudoedit" must NOT match "sudo:*" (word boundary respected).
+ assert!(!command_matches_pattern("sudoedit /etc/hosts", "sudo:*"));
+ }
+
+ // Bug 2: *:* catch-all must match everything
+ #[test]
+ fn test_star_colon_star_matches_everything() {
+ assert!(command_matches_pattern("rm -rf /", "*:*"));
+ assert!(command_matches_pattern("git push --force", "*:*"));
+ assert!(command_matches_pattern("anything", "*:*"));
+ }
+
+ // Bug 3: leading wildcard — positive
+ #[test]
+ fn test_leading_wildcard() {
+ assert!(command_matches_pattern("git push --force", "* --force"));
+ assert!(command_matches_pattern("npm run --force", "* --force"));
+ }
+
+ // Bug 3: leading wildcard — negative (suffix anchoring)
+ #[test]
+ fn test_leading_wildcard_no_partial() {
+ assert!(!command_matches_pattern("git push --forceful", "* --force"));
+ assert!(!command_matches_pattern("git push", "* --force"));
+ }
+
+ // Bug 3: middle wildcard — positive
+ #[test]
+ fn test_middle_wildcard() {
+ assert!(command_matches_pattern("git push main", "git * main"));
+ assert!(command_matches_pattern("git rebase main", "git * main"));
+ }
+
+ // Bug 3: middle wildcard — negative
+ #[test]
+ fn test_middle_wildcard_no_match() {
+ assert!(!command_matches_pattern("git push develop", "git * main"));
+ }
+
+ // Bug 3: multiple wildcards
+ #[test]
+ fn test_multiple_wildcards() {
+ assert!(command_matches_pattern(
+ "git push --force origin main",
+ "git * --force *"
+ ));
+ assert!(!command_matches_pattern(
+ "git pull origin main",
+ "git * --force *"
+ ));
+ }
+
+ // Integration: deny with leading wildcard
+ #[test]
+ fn test_deny_with_leading_wildcard() {
+ let deny = vec!["* --force".to_string()];
+ assert_eq!(
+ check_command_with_rules("git push --force", &deny, &[]),
+ PermissionVerdict::Deny
+ );
+ assert_eq!(
+ check_command_with_rules("git push", &deny, &[]),
+ PermissionVerdict::Allow
+ );
+ }
+
+ // Integration: deny *:* blocks everything
+ #[test]
+ fn test_deny_star_colon_star() {
+ let deny = vec!["*:*".to_string()];
+ assert_eq!(
+ check_command_with_rules("rm -rf /", &deny, &[]),
+ PermissionVerdict::Deny
+ );
+ }
+}
diff --git a/src/pip_cmd.rs b/src/pip_cmd.rs
index 359aef317..1c7dc9317 100644
--- a/src/pip_cmd.rs
+++ b/src/pip_cmd.rs
@@ -33,10 +33,8 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> {
run_passthrough(base_cmd, args, verbose)?
}
_ => {
- anyhow::bail!(
- "rtk pip: unsupported subcommand '{}'\nSupported: list, outdated, install, uninstall, show",
- subcommand
- );
+ // Unknown subcommand: passthrough to pip/uv
+ run_passthrough(base_cmd, args, verbose)?
}
};
@@ -208,7 +206,7 @@ fn filter_pip_outdated(output: &str) -> String {
};
if packages.is_empty() {
- return "✓ pip outdated: All packages up to date".to_string();
+ return "pip outdated: All packages up to date".to_string();
}
let mut result = String::new();
@@ -216,11 +214,7 @@ fn filter_pip_outdated(output: &str) -> String {
result.push_str("═══════════════════════════════════════\n");
for (i, pkg) in packages.iter().take(20).enumerate() {
- let latest = pkg
- .latest_version
- .as_ref()
- .map(|v| v.as_str())
- .unwrap_or("unknown");
+ let latest = pkg.latest_version.as_deref().unwrap_or("unknown");
result.push_str(&format!(
"{}. {} ({} → {})\n",
i + 1,
@@ -234,7 +228,7 @@ fn filter_pip_outdated(output: &str) -> String {
result.push_str(&format!("\n... +{} more packages\n", packages.len() - 20));
}
- result.push_str("\n💡 Run `pip install --upgrade ` to update\n");
+ result.push_str("\n[hint] Run `pip install --upgrade ` to update\n");
result.trim().to_string()
}
@@ -269,7 +263,6 @@ mod tests {
fn test_filter_pip_outdated_none() {
let output = "[]";
let result = filter_pip_outdated(output);
- assert!(result.contains("✓"));
assert!(result.contains("All packages up to date"));
}
diff --git a/src/playwright_cmd.rs b/src/playwright_cmd.rs
index 0031ecc34..ce6f0fe78 100644
--- a/src/playwright_cmd.rs
+++ b/src/playwright_cmd.rs
@@ -5,8 +5,8 @@ use regex::Regex;
use serde::Deserialize;
use crate::parser::{
- emit_degradation_warning, emit_passthrough_warning, truncate_output, FormatMode, OutputParser,
- ParseResult, TestFailure, TestResult, TokenFormatter,
+ emit_degradation_warning, emit_passthrough_warning, truncate_passthrough, FormatMode,
+ OutputParser, ParseResult, TestFailure, TestResult, TokenFormatter,
};
/// Matches real Playwright JSON reporter output (suites → specs → tests → results)
@@ -110,7 +110,7 @@ impl OutputParser for PlaywrightParser {
}
None => {
// Tier 3: Passthrough
- ParseResult::Passthrough(truncate_output(input, 500))
+ ParseResult::Passthrough(truncate_passthrough(input))
}
}
}
@@ -314,7 +314,12 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> {
}
};
- println!("{}", filtered);
+ let exit_code = output.status.code().unwrap_or(1);
+ if let Some(hint) = crate::tee::tee_and_hint(&raw, "playwright", exit_code) {
+ println!("{}\n{}", filtered, hint);
+ } else {
+ println!("{}", filtered);
+ }
timer.track(
&format!("playwright {}", args.join(" ")),
@@ -325,7 +330,7 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> {
// Preserve exit code for CI/CD
if !output.status.success() {
- std::process::exit(output.status.code().unwrap_or(1));
+ std::process::exit(exit_code);
}
Ok(())
diff --git a/src/pnpm_cmd.rs b/src/pnpm_cmd.rs
index 50371763e..6690c16e6 100644
--- a/src/pnpm_cmd.rs
+++ b/src/pnpm_cmd.rs
@@ -6,7 +6,7 @@ use std::collections::HashMap;
use std::ffi::OsString;
use crate::parser::{
- emit_degradation_warning, emit_passthrough_warning, truncate_output, Dependency,
+ emit_degradation_warning, emit_passthrough_warning, truncate_passthrough, Dependency,
DependencyState, FormatMode, OutputParser, ParseResult, TokenFormatter,
};
@@ -75,7 +75,7 @@ impl OutputParser for PnpmListParser {
}
None => {
// Tier 3: Passthrough
- ParseResult::Passthrough(truncate_output(input, 500))
+ ParseResult::Passthrough(truncate_passthrough(input))
}
}
}
@@ -202,7 +202,7 @@ impl OutputParser for PnpmOutdatedParser {
}
None => {
// Tier 3: Passthrough
- ParseResult::Passthrough(truncate_output(input, 500))
+ ParseResult::Passthrough(truncate_passthrough(input))
}
}
}
@@ -307,7 +307,8 @@ fn run_list(depth: usize, args: &[String], verbose: u8) -> Result<()> {
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
- anyhow::bail!("pnpm list failed: {}", stderr);
+ eprint!("{}", stderr);
+ std::process::exit(output.status.code().unwrap_or(1));
}
let stdout = String::from_utf8_lossy(&output.stdout);
@@ -388,7 +389,7 @@ fn run_outdated(args: &[String], verbose: u8) -> Result<()> {
};
if filtered.trim().is_empty() {
- println!("All packages up-to-date ✓");
+ println!("All packages up-to-date");
} else {
println!("{}", filtered);
}
@@ -431,7 +432,8 @@ fn run_install(packages: &[String], args: &[String], verbose: u8) -> Result<()>
let stderr = String::from_utf8_lossy(&output.stderr);
if !output.status.success() {
- anyhow::bail!("pnpm install failed: {}", stderr);
+ eprint!("{}", stderr);
+ std::process::exit(output.status.code().unwrap_or(1));
}
let combined = format!("{}{}", stdout, stderr);
@@ -482,7 +484,7 @@ fn filter_pnpm_install(output: &str) -> String {
}
if result.is_empty() {
- "ok ✓".to_string()
+ "ok".to_string()
} else {
result.join("\n")
}
diff --git a/src/prettier_cmd.rs b/src/prettier_cmd.rs
index 1c2bbfded..2bbc98fe5 100644
--- a/src/prettier_cmd.rs
+++ b/src/prettier_cmd.rs
@@ -112,7 +112,7 @@ pub fn filter_prettier_output(output: &str) -> String {
// Check if all files are formatted
if files_to_format.is_empty() && output.contains("All matched files use Prettier") {
- return "✓ Prettier: All files formatted correctly".to_string();
+ return "Prettier: All files formatted correctly".to_string();
}
// Check if files were written (write mode)
@@ -125,7 +125,7 @@ pub fn filter_prettier_output(output: &str) -> String {
if is_check_mode {
// Check mode: show files that need formatting
if files_to_format.is_empty() {
- result.push_str("✓ Prettier: All files formatted correctly\n");
+ result.push_str("Prettier: All files formatted correctly\n");
} else {
result.push_str(&format!(
"Prettier: {} files need formatting\n",
@@ -146,7 +146,7 @@ pub fn filter_prettier_output(output: &str) -> String {
if files_checked > 0 {
result.push_str(&format!(
- "\n✓ {} files already formatted\n",
+ "\n{} files already formatted\n",
files_checked - files_to_format.len()
));
}
@@ -154,7 +154,7 @@ pub fn filter_prettier_output(output: &str) -> String {
} else {
// Write mode: show what was formatted
result.push_str(&format!(
- "✓ Prettier: {} files formatted\n",
+ "Prettier: {} files formatted\n",
files_to_format.len()
));
}
@@ -173,7 +173,7 @@ Checking formatting...
All matched files use Prettier code style!
"#;
let result = filter_prettier_output(output);
- assert!(result.contains("✓ Prettier"));
+ assert!(result.contains("Prettier"));
assert!(result.contains("All files formatted correctly"));
}
diff --git a/src/prisma_cmd.rs b/src/prisma_cmd.rs
index 6cd6cc276..a82ece07c 100644
--- a/src/prisma_cmd.rs
+++ b/src/prisma_cmd.rs
@@ -221,7 +221,7 @@ fn filter_prisma_generate(output: &str) -> String {
}
let mut result = String::new();
- result.push_str("✓ Prisma Client generated\n");
+ result.push_str("Prisma Client generated\n");
if models > 0 || enums > 0 || types > 0 {
result.push_str(&format!(
@@ -283,7 +283,7 @@ fn filter_migrate_dev(output: &str) -> String {
let mut result = String::new();
if !migration_name.is_empty() {
- result.push_str(&format!("🗃️ Migration: {}\n", migration_name));
+ result.push_str(&format!("Migration: {}\n", migration_name));
result.push_str("═══════════════════════════════════════\n");
}
@@ -303,7 +303,7 @@ fn filter_migrate_dev(output: &str) -> String {
result.push('\n');
if applied {
- result.push_str("✓ Applied | Pending: 0\n");
+ result.push_str("Applied | Pending: 0\n");
}
result.trim().to_string()
@@ -360,9 +360,9 @@ fn filter_migrate_deploy(output: &str) -> String {
let mut result = String::new();
if errors.is_empty() {
- result.push_str(&format!("✓ {} migration(s) deployed\n", deployed));
+ result.push_str(&format!("{} migration(s) deployed\n", deployed));
} else {
- result.push_str("❌ Deployment failed:\n");
+ result.push_str("[FAIL] Deployment failed:\n");
for err in errors.iter().take(5) {
result.push_str(&format!(" {}\n", err));
}
@@ -390,7 +390,7 @@ fn filter_db_push(output: &str) -> String {
}
let mut result = String::new();
- result.push_str("✓ Schema pushed to database\n");
+ result.push_str("Schema pushed to database\n");
if tables_added > 0 || columns_modified > 0 || dropped > 0 {
result.push_str(&format!(
@@ -460,7 +460,7 @@ import { PrismaClient } from '@prisma/client'
42 models, 18 enums, 890 types generated
"#;
let result = filter_prisma_generate(output);
- assert!(result.contains("✓ Prisma Client generated"));
+ assert!(result.contains("Prisma Client generated"));
// Parser may not extract exact counts from this format, just check it doesn't crash
assert!(!result.contains("Prisma schema loaded"));
assert!(!result.contains("Start by importing"));
@@ -484,7 +484,7 @@ CREATE INDEX "session_status_idx" ON "Session"("status");
let result = filter_migrate_dev(output);
assert!(result.contains("20260128_add_sessions"));
assert!(result.contains("+ 1 table"));
- assert!(result.contains("✓ Applied"));
+ assert!(result.contains("Applied"));
}
#[test]
diff --git a/src/pytest_cmd.rs b/src/pytest_cmd.rs
index ad6942260..0b1b1f2c1 100644
--- a/src/pytest_cmd.rs
+++ b/src/pytest_cmd.rs
@@ -167,7 +167,7 @@ fn build_pytest_summary(summary: &str, _test_files: &[String], failures: &[Strin
let (passed, failed, skipped) = parse_summary_line(summary);
if failed == 0 && passed > 0 {
- return format!("✓ Pytest: {} passed", passed);
+ return format!("Pytest: {} passed", passed);
}
if passed == 0 && failed == 0 {
@@ -198,13 +198,13 @@ fn build_pytest_summary(summary: &str, _test_files: &[String], failures: &[Strin
if first_line.starts_with("___") {
// Extract test name between ___
let test_name = first_line.trim_matches('_').trim();
- result.push_str(&format!("{}. ❌ {}\n", i + 1, test_name));
+ result.push_str(&format!("{}. [FAIL] {}\n", i + 1, test_name));
} else if first_line.starts_with("FAILED") {
// Summary format: "FAILED tests/test_foo.py::test_bar - AssertionError"
let parts: Vec<&str> = first_line.split(" - ").collect();
if let Some(test_path) = parts.first() {
let test_name = test_path.trim_start_matches("FAILED ");
- result.push_str(&format!("{}. ❌ {}\n", i + 1, test_name));
+ result.push_str(&format!("{}. [FAIL] {}\n", i + 1, test_name));
}
if parts.len() > 1 {
result.push_str(&format!(" {}\n", truncate(parts[1], 100)));
@@ -288,7 +288,7 @@ tests/test_foo.py ..... [100%]
=== 5 passed in 0.50s ==="#;
let result = filter_pytest_output(output);
- assert!(result.contains("✓ Pytest"));
+ assert!(result.contains("Pytest"));
assert!(result.contains("5 passed"));
}
diff --git a/src/rake_cmd.rs b/src/rake_cmd.rs
new file mode 100644
index 000000000..e3fba68fa
--- /dev/null
+++ b/src/rake_cmd.rs
@@ -0,0 +1,552 @@
+//! Minitest output filter for `rake test` and `rails test`.
+//!
+//! Parses the standard Minitest output format produced by both `rake test` and
+//! `rails test`, filtering down to failures/errors and the summary line.
+//! Uses `ruby_exec("rake")` to auto-detect `bundle exec`.
+
+use crate::tracking;
+use crate::utils::{exit_code_from_output, ruby_exec, strip_ansi};
+use anyhow::{Context, Result};
+
+/// Decide whether to use `rake test` or `rails test` based on args.
+///
+/// `rake test` only supports a single file via `TEST=path` and ignores positional
+/// file args. When any positional test file paths are detected, we switch to
+/// `rails test` which handles single files, multiple files, and line-number
+/// syntax (`file.rb:15`) natively.
+fn select_runner(args: &[String]) -> (&'static str, Vec) {
+ let has_test_subcommand = args.first().map_or(false, |a| a == "test");
+ if !has_test_subcommand {
+ return ("rake", args.to_vec());
+ }
+
+ let after_test: Vec<&String> = args[1..].iter().collect();
+
+ let positional_files: Vec<&&String> = after_test
+ .iter()
+ .filter(|a| !a.contains('=') && !a.starts_with('-'))
+ .filter(|a| looks_like_test_path(a))
+ .collect();
+
+ let needs_rails = !positional_files.is_empty();
+
+ if needs_rails {
+ ("rails", args.to_vec())
+ } else {
+ ("rake", args.to_vec())
+ }
+}
+
+fn looks_like_test_path(arg: &str) -> bool {
+ let path = arg.split(':').next().unwrap_or(arg);
+ path.ends_with(".rb")
+ || path.starts_with("test/")
+ || path.starts_with("spec/")
+ || path.contains("_test.rb")
+ || path.contains("_spec.rb")
+}
+
+pub fn run(args: &[String], verbose: u8) -> Result<()> {
+ let timer = tracking::TimedExecution::start();
+
+ let (tool, effective_args) = select_runner(args);
+ let mut cmd = ruby_exec(tool);
+ for arg in &effective_args {
+ cmd.arg(arg);
+ }
+
+ if verbose > 0 {
+ eprintln!(
+ "Running: {} {}",
+ cmd.get_program().to_string_lossy(),
+ effective_args.join(" ")
+ );
+ }
+
+ let output = cmd
+ .output()
+ .context("Failed to run rake. Is it installed? Try: gem install rake")?;
+
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ let raw = format!("{}\n{}", stdout, stderr);
+
+ let filtered = filter_minitest_output(&raw);
+
+ let exit_code = exit_code_from_output(&output, "rake");
+ if let Some(hint) = crate::tee::tee_and_hint(&raw, "rake", exit_code) {
+ println!("{}\n{}", filtered, hint);
+ } else {
+ println!("{}", filtered);
+ }
+
+ if !stderr.trim().is_empty() && verbose > 0 {
+ eprintln!("{}", stderr.trim());
+ }
+
+ timer.track(
+ &format!("rake {}", args.join(" ")),
+ &format!("rtk rake {}", args.join(" ")),
+ &raw,
+ &filtered,
+ );
+
+ if !output.status.success() {
+ std::process::exit(exit_code);
+ }
+
+ Ok(())
+}
+
+#[derive(Debug, PartialEq)]
+enum ParseState {
+ Header,
+ Running,
+ Failures,
+ #[allow(dead_code)]
+ Summary,
+}
+
+/// Parse Minitest output using a state machine.
+///
+/// Minitest produces output like:
+/// ```text
+/// Run options: --seed 12345
+///
+/// # Running:
+///
+/// ..F..E..
+///
+/// Finished in 0.123456s, 64.8 runs/s
+///
+/// 1) Failure:
+/// TestSomething#test_that_fails [/path/to/test.rb:15]:
+/// Expected: true
+/// Actual: false
+///
+/// 8 runs, 7 assertions, 1 failures, 1 errors, 0 skips
+/// ```
+fn filter_minitest_output(output: &str) -> String {
+ let clean = strip_ansi(output);
+ let mut state = ParseState::Header;
+ let mut failures: Vec = Vec::new();
+ let mut current_failure: Vec = Vec::new();
+ let mut summary_line = String::new();
+
+ for line in clean.lines() {
+ let trimmed = line.trim();
+
+ // Detect summary line anywhere (it's always last meaningful line)
+ // Handles both "N runs, N assertions, ..." and "N tests, N assertions, ..."
+ if (trimmed.contains(" runs,") || trimmed.contains(" tests,"))
+ && trimmed.contains(" assertions,")
+ {
+ summary_line = trimmed.to_string();
+ continue;
+ }
+
+ // State transitions — handle both standard Minitest and minitest-reporters
+ if trimmed == "# Running:" || trimmed.starts_with("Started with run options") {
+ state = ParseState::Running;
+ continue;
+ }
+
+ if trimmed.starts_with("Finished in ") {
+ state = ParseState::Failures;
+ continue;
+ }
+
+ match state {
+ ParseState::Header | ParseState::Running => {
+ // Skip seed line, blank lines, progress dots
+ continue;
+ }
+ ParseState::Failures => {
+ if is_failure_header(trimmed) {
+ if !current_failure.is_empty() {
+ failures.push(current_failure.join("\n"));
+ current_failure.clear();
+ }
+ current_failure.push(trimmed.to_string());
+ } else if trimmed.is_empty() && !current_failure.is_empty() {
+ failures.push(current_failure.join("\n"));
+ current_failure.clear();
+ } else if !trimmed.is_empty() {
+ current_failure.push(line.to_string());
+ }
+ }
+ ParseState::Summary => {}
+ }
+ }
+
+ // Save last failure if any
+ if !current_failure.is_empty() {
+ failures.push(current_failure.join("\n"));
+ }
+
+ build_minitest_summary(&summary_line, &failures)
+}
+
+fn is_failure_header(line: &str) -> bool {
+ lazy_static::lazy_static! {
+ static ref RE_FAILURE: regex::Regex =
+ regex::Regex::new(r"^\d+\)\s+(Failure|Error):$").unwrap();
+ }
+ RE_FAILURE.is_match(line)
+}
+
+fn build_minitest_summary(summary: &str, failures: &[String]) -> String {
+ let (runs, _assertions, fail_count, error_count, skips) = parse_minitest_summary(summary);
+
+ if runs == 0 && summary.is_empty() {
+ return "rake test: no tests ran".to_string();
+ }
+
+ if fail_count == 0 && error_count == 0 {
+ let mut msg = format!("ok rake test: {} runs, 0 failures", runs);
+ if skips > 0 {
+ msg.push_str(&format!(", {} skips", skips));
+ }
+ return msg;
+ }
+
+ let mut result = String::new();
+ result.push_str(&format!(
+ "rake test: {} runs, {} failures, {} errors",
+ runs, fail_count, error_count
+ ));
+ if skips > 0 {
+ result.push_str(&format!(", {} skips", skips));
+ }
+ result.push('\n');
+
+ if failures.is_empty() {
+ return result.trim().to_string();
+ }
+
+ result.push('\n');
+
+ for (i, failure) in failures.iter().take(10).enumerate() {
+ let lines: Vec<&str> = failure.lines().collect();
+ // First line is like " 1) Failure:" or " 1) Error:"
+ if let Some(header) = lines.first() {
+ result.push_str(&format!("{}. {}\n", i + 1, header.trim()));
+ }
+ // Remaining lines contain test name, file:line, assertion message
+ for line in lines.iter().skip(1).take(4) {
+ let trimmed = line.trim();
+ if !trimmed.is_empty() {
+ result.push_str(&format!(" {}\n", crate::utils::truncate(trimmed, 120)));
+ }
+ }
+ if i < failures.len().min(10) - 1 {
+ result.push('\n');
+ }
+ }
+
+ if failures.len() > 10 {
+ result.push_str(&format!("\n... +{} more failures\n", failures.len() - 10));
+ }
+
+ result.trim().to_string()
+}
+
+fn parse_minitest_summary(summary: &str) -> (usize, usize, usize, usize, usize) {
+ let mut runs = 0;
+ let mut assertions = 0;
+ let mut failures = 0;
+ let mut errors = 0;
+ let mut skips = 0;
+
+ for part in summary.split(',') {
+ let part = part.trim();
+ let words: Vec<&str> = part.split_whitespace().collect();
+ if words.len() >= 2 {
+ if let Ok(n) = words[0].parse::() {
+ match words[1].trim_end_matches(',') {
+ "runs" | "run" | "tests" | "test" => runs = n,
+ "assertions" | "assertion" => assertions = n,
+ "failures" | "failure" => failures = n,
+ "errors" | "error" => errors = n,
+ "skips" | "skip" => skips = n,
+ _ => {}
+ }
+ }
+ }
+ }
+
+ (runs, assertions, failures, errors, skips)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::utils::count_tokens;
+
+ #[test]
+ fn test_filter_minitest_all_pass() {
+ let output = r#"Run options: --seed 12345
+
+# Running:
+
+........
+
+Finished in 0.123456s, 64.8 runs/s, 72.9 assertions/s.
+
+8 runs, 9 assertions, 0 failures, 0 errors, 0 skips"#;
+
+ let result = filter_minitest_output(output);
+ assert!(result.contains("ok rake test"));
+ assert!(result.contains("8 runs"));
+ assert!(result.contains("0 failures"));
+ }
+
+ #[test]
+ fn test_filter_minitest_with_failures() {
+ let output = r#"Run options: --seed 54321
+
+# Running:
+
+..F....
+
+Finished in 0.234567s, 29.8 runs/s
+
+ 1) Failure:
+TestSomething#test_that_fails [/path/to/test.rb:15]:
+Expected: true
+ Actual: false
+
+7 runs, 7 assertions, 1 failures, 0 errors, 0 skips"#;
+
+ let result = filter_minitest_output(output);
+ assert!(result.contains("1 failures"));
+ assert!(result.contains("test_that_fails"));
+ assert!(result.contains("Expected: true"));
+ }
+
+ #[test]
+ fn test_filter_minitest_with_errors() {
+ let output = r#"Run options: --seed 99999
+
+# Running:
+
+.E....
+
+Finished in 0.345678s, 17.4 runs/s
+
+ 1) Error:
+TestOther#test_boom [/path/to/test.rb:42]:
+RuntimeError: something went wrong
+ /path/to/test.rb:42:in `test_boom'
+
+6 runs, 5 assertions, 0 failures, 1 errors, 0 skips"#;
+
+ let result = filter_minitest_output(output);
+ assert!(result.contains("1 errors"));
+ assert!(result.contains("test_boom"));
+ assert!(result.contains("RuntimeError"));
+ }
+
+ #[test]
+ fn test_filter_minitest_empty() {
+ let result = filter_minitest_output("");
+ assert!(result.contains("no tests ran"));
+ }
+
+ #[test]
+ fn test_filter_minitest_skip() {
+ let output = r#"Run options: --seed 11111
+
+# Running:
+
+..S..
+
+Finished in 0.100000s, 50.0 runs/s
+
+5 runs, 4 assertions, 0 failures, 0 errors, 1 skips"#;
+
+ let result = filter_minitest_output(output);
+ assert!(result.contains("ok rake test"));
+ assert!(result.contains("1 skips"));
+ }
+
+ #[test]
+ fn test_token_savings() {
+ let mut dots = String::new();
+ for _ in 0..20 {
+ dots.push_str(
+ "......................................................................\n",
+ );
+ }
+ let output = format!(
+ "Run options: --seed 12345\n\n\
+ # Running:\n\n\
+ {}\n\
+ Finished in 2.345678s, 213.4 runs/s, 428.7 assertions/s.\n\n\
+ 500 runs, 1003 assertions, 0 failures, 0 errors, 0 skips",
+ dots
+ );
+
+ let input_tokens = count_tokens(&output);
+ let result = filter_minitest_output(&output);
+ let output_tokens = count_tokens(&result);
+
+ let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0);
+ assert!(
+ savings >= 80.0,
+ "Expected >= 80% savings, got {:.1}% (input: {}, output: {})",
+ savings,
+ input_tokens,
+ output_tokens
+ );
+ }
+
+ #[test]
+ fn test_parse_minitest_summary() {
+ assert_eq!(
+ parse_minitest_summary("8 runs, 9 assertions, 0 failures, 0 errors, 0 skips"),
+ (8, 9, 0, 0, 0)
+ );
+ assert_eq!(
+ parse_minitest_summary("5 runs, 4 assertions, 1 failures, 1 errors, 2 skips"),
+ (5, 4, 1, 1, 2)
+ );
+ // minitest-reporters uses "tests" instead of "runs"
+ assert_eq!(
+ parse_minitest_summary("57 tests, 378 assertions, 0 failures, 0 errors, 0 skips"),
+ (57, 378, 0, 0, 0)
+ );
+ }
+
+ #[test]
+ fn test_filter_minitest_multiple_failures() {
+ let output = r#"Run options: --seed 77777
+
+# Running:
+
+.FF.E.
+
+Finished in 0.500000s, 12.0 runs/s
+
+ 1) Failure:
+TestFoo#test_alpha [/test.rb:10]:
+Expected: 1
+ Actual: 2
+
+ 2) Failure:
+TestFoo#test_beta [/test.rb:20]:
+Expected: "hello"
+ Actual: "world"
+
+ 3) Error:
+TestBar#test_gamma [/test.rb:30]:
+NoMethodError: undefined method `blah'
+
+6 runs, 5 assertions, 2 failures, 1 errors, 0 skips"#;
+
+ let result = filter_minitest_output(output);
+ assert!(result.contains("2 failures"));
+ assert!(result.contains("1 errors"));
+ assert!(result.contains("test_alpha"));
+ assert!(result.contains("test_beta"));
+ assert!(result.contains("test_gamma"));
+ }
+
+ #[test]
+ fn test_filter_minitest_reporters_format() {
+ let output = "Started with run options --seed 37764\n\n\
+ Progress: |========================================|\n\n\
+ Finished in 5.79938s\n\
+ 57 tests, 378 assertions, 0 failures, 0 errors, 0 skips";
+
+ let result = filter_minitest_output(output);
+ assert!(result.contains("ok rake test"));
+ assert!(result.contains("57 runs"));
+ assert!(result.contains("0 failures"));
+ }
+
+ #[test]
+ fn test_filter_minitest_with_ansi() {
+ let output = "\x1b[32mRun options: --seed 12345\x1b[0m\n\n\
+ # Running:\n\n\
+ \x1b[32m....\x1b[0m\n\n\
+ Finished in 0.1s, 40.0 runs/s\n\n\
+ 4 runs, 4 assertions, 0 failures, 0 errors, 0 skips";
+
+ let result = filter_minitest_output(output);
+ assert!(result.contains("ok rake test"));
+ assert!(result.contains("4 runs"));
+ }
+
+ // ── select_runner tests ─────────────────────────────
+
+ fn args(s: &str) -> Vec {
+ s.split_whitespace().map(String::from).collect()
+ }
+
+ #[test]
+ fn test_select_runner_single_file_uses_rake() {
+ let (tool, _) = select_runner(&args("test TEST=test/models/post_test.rb"));
+ assert_eq!(tool, "rake");
+ }
+
+ #[test]
+ fn test_select_runner_no_files_uses_rake() {
+ let (tool, _) = select_runner(&args("test"));
+ assert_eq!(tool, "rake");
+ }
+
+ #[test]
+ fn test_select_runner_multiple_files_uses_rails() {
+ let (tool, a) = select_runner(&args(
+ "test test/models/post_test.rb test/models/user_test.rb",
+ ));
+ assert_eq!(tool, "rails");
+ assert_eq!(
+ a,
+ args("test test/models/post_test.rb test/models/user_test.rb")
+ );
+ }
+
+ #[test]
+ fn test_select_runner_line_number_uses_rails() {
+ let (tool, _) = select_runner(&args("test test/models/post_test.rb:15"));
+ assert_eq!(tool, "rails");
+ }
+
+ #[test]
+ fn test_select_runner_multiple_with_line_numbers() {
+ let (tool, _) = select_runner(&args(
+ "test test/models/post_test.rb:15 test/models/user_test.rb:30",
+ ));
+ assert_eq!(tool, "rails");
+ }
+
+ #[test]
+ fn test_select_runner_non_test_subcommand_uses_rake() {
+ let (tool, _) = select_runner(&args("db:migrate"));
+ assert_eq!(tool, "rake");
+ }
+
+ #[test]
+ fn test_select_runner_single_positional_file_uses_rails() {
+ let (tool, _) = select_runner(&args("test test/models/post_test.rb"));
+ assert_eq!(tool, "rails");
+ }
+
+ #[test]
+ fn test_select_runner_flags_not_counted_as_files() {
+ let (tool, _) = select_runner(&args("test --verbose --seed 12345"));
+ assert_eq!(tool, "rake");
+ }
+
+ #[test]
+ fn test_looks_like_test_path() {
+ assert!(looks_like_test_path("test/models/post_test.rb"));
+ assert!(looks_like_test_path("test/models/post_test.rb:15"));
+ assert!(looks_like_test_path("spec/models/post_spec.rb"));
+ assert!(looks_like_test_path("my_file.rb"));
+ assert!(!looks_like_test_path("--verbose"));
+ assert!(!looks_like_test_path("12345"));
+ }
+}
diff --git a/src/rewrite_cmd.rs b/src/rewrite_cmd.rs
index 754f51a9f..c64997b49 100644
--- a/src/rewrite_cmd.rs
+++ b/src/rewrite_cmd.rs
@@ -1,26 +1,47 @@
use crate::discover::registry;
+use crate::permissions::{check_command, PermissionVerdict};
+use std::io::Write;
/// Run the `rtk rewrite` command.
///
-/// Prints the RTK-rewritten command to stdout and exits 0.
-/// Exits 1 (without output) if the command has no RTK equivalent.
+/// Prints the RTK-rewritten command to stdout and exits with a code that tells
+/// the caller how to handle permissions:
///
-/// Used by shell hooks to rewrite commands transparently:
-/// ```bash
-/// REWRITTEN=$(rtk rewrite "$CMD") || exit 0
-/// [ "$CMD" = "$REWRITTEN" ] && exit 0 # already RTK, skip
-/// ```
+/// | Exit | Stdout | Meaning |
+/// |------|----------|--------------------------------------------------------------|
+/// | 0 | rewritten| Rewrite allowed — hook may auto-allow the rewritten command. |
+/// | 1 | (none) | No RTK equivalent — hook passes through unchanged. |
+/// | 2 | (none) | Deny rule matched — hook defers to Claude Code native deny. |
+/// | 3 | rewritten| Ask rule matched — hook rewrites but lets Claude Code prompt.|
pub fn run(cmd: &str) -> anyhow::Result<()> {
let excluded = crate::config::Config::load()
.map(|c| c.hooks.exclude_commands)
.unwrap_or_default();
+ // SECURITY: check deny/ask BEFORE rewrite so non-RTK commands are also covered.
+ let verdict = check_command(cmd);
+
+ if verdict == PermissionVerdict::Deny {
+ std::process::exit(2);
+ }
+
match registry::rewrite_command(cmd, &excluded) {
- Some(rewritten) => {
- print!("{}", rewritten);
- Ok(())
- }
+ Some(rewritten) => match verdict {
+ PermissionVerdict::Allow => {
+ print!("{}", rewritten);
+ let _ = std::io::stdout().flush();
+ Ok(())
+ }
+ PermissionVerdict::Ask => {
+ print!("{}", rewritten);
+ let _ = std::io::stdout().flush();
+ std::process::exit(3);
+ }
+ PermissionVerdict::Deny => unreachable!(),
+ },
None => {
+ // No RTK equivalent. Exit 1 = passthrough.
+ // Claude Code independently evaluates its own ask rules on the original cmd.
std::process::exit(1);
}
}
diff --git a/src/rspec_cmd.rs b/src/rspec_cmd.rs
new file mode 100644
index 000000000..3d8bf2c4b
--- /dev/null
+++ b/src/rspec_cmd.rs
@@ -0,0 +1,1046 @@
+//! RSpec test runner filter.
+//!
+//! Injects `--format json` to get structured output, parses it to show only
+//! failures. Falls back to a state-machine text parser when JSON is unavailable
+//! (e.g., user specified `--format documentation`) or when injected JSON output
+//! fails to parse.
+
+use crate::tracking;
+use crate::utils::{exit_code_from_output, fallback_tail, ruby_exec, truncate};
+use anyhow::{Context, Result};
+use lazy_static::lazy_static;
+use regex::Regex;
+use serde::Deserialize;
+
+// ── Noise-stripping regex patterns ──────────────────────────────────────────
+
+lazy_static! {
+ static ref RE_SPRING: Regex = Regex::new(r"(?i)running via spring preloader").unwrap();
+ static ref RE_SIMPLECOV: Regex =
+ Regex::new(r"(?i)(coverage report|simplecov|coverage/|\.simplecov|All Files.*Lines)")
+ .unwrap();
+ static ref RE_DEPRECATION: Regex = Regex::new(r"^DEPRECATION WARNING:").unwrap();
+ static ref RE_FINISHED_IN: Regex = Regex::new(r"^Finished in \d").unwrap();
+ static ref RE_SCREENSHOT: Regex = Regex::new(r"saved screenshot to (.+)").unwrap();
+ static ref RE_RSPEC_SUMMARY: Regex = Regex::new(r"(\d+) examples?, (\d+) failures?").unwrap();
+}
+
+// ── JSON structures matching RSpec's --format json output ───────────────────
+
+#[derive(Deserialize)]
+struct RspecOutput {
+ examples: Vec,
+ summary: RspecSummary,
+}
+
+#[derive(Deserialize)]
+struct RspecExample {
+ full_description: String,
+ status: String,
+ file_path: String,
+ line_number: u32,
+ exception: Option,
+}
+
+#[derive(Deserialize)]
+struct RspecException {
+ class: String,
+ message: String,
+ #[serde(default)]
+ backtrace: Vec,
+}
+
+#[derive(Deserialize)]
+struct RspecSummary {
+ duration: f64,
+ example_count: usize,
+ failure_count: usize,
+ pending_count: usize,
+ #[serde(default)]
+ errors_outside_of_examples_count: usize,
+}
+
+// ── Public entry point ───────────────────────────────────────────────────────
+
+pub fn run(args: &[String], verbose: u8) -> Result<()> {
+ let timer = tracking::TimedExecution::start();
+
+ let mut cmd = ruby_exec("rspec");
+
+ // Inject --format json unless the user already specified a format.
+ // Handles: --format, -f, --format=..., -fj, -fjson, -fdocumentation (from PR #534)
+ let has_format = args.iter().any(|a| {
+ a == "--format"
+ || a == "-f"
+ || a.starts_with("--format=")
+ || (a.starts_with("-f") && a.len() > 2 && !a.starts_with("--"))
+ });
+
+ if !has_format {
+ cmd.arg("--format").arg("json");
+ }
+
+ cmd.args(args);
+
+ if verbose > 0 {
+ let injected = if has_format { "" } else { " --format json" };
+ eprintln!("Running: rspec{} {}", injected, args.join(" "));
+ }
+
+ let output = cmd.output().context(
+ "Failed to run rspec. Is it installed? Try: gem install rspec or add it to your Gemfile",
+ )?;
+
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ let raw = format!("{}\n{}", stdout, stderr);
+
+ let exit_code = exit_code_from_output(&output, "rspec");
+
+ let filtered = if stdout.trim().is_empty() && !output.status.success() {
+ "RSpec: FAILED (no stdout, see stderr below)".to_string()
+ } else if has_format {
+ // User specified format — use text fallback on stripped output
+ let stripped = strip_noise(&stdout);
+ filter_rspec_text(&stripped)
+ } else {
+ filter_rspec_output(&stdout)
+ };
+
+ if let Some(hint) = crate::tee::tee_and_hint(&raw, "rspec", exit_code) {
+ println!("{}\n{}", filtered, hint);
+ } else {
+ println!("{}", filtered);
+ }
+
+ if !stderr.trim().is_empty() && (!output.status.success() || verbose > 0) {
+ eprintln!("{}", stderr.trim());
+ }
+
+ timer.track(
+ &format!("rspec {}", args.join(" ")),
+ &format!("rtk rspec {}", args.join(" ")),
+ &raw,
+ &filtered,
+ );
+
+ if !output.status.success() {
+ std::process::exit(exit_code);
+ }
+
+ Ok(())
+}
+
+// ── Noise stripping ─────────────────────────────────────────────────────────
+
+/// Remove noise lines: Spring preloader, SimpleCov, DEPRECATION warnings,
+/// "Finished in" timing line, and Capybara screenshot details (keep path only).
+fn strip_noise(output: &str) -> String {
+ let mut result = Vec::new();
+ let mut in_simplecov_block = false;
+
+ for line in output.lines() {
+ let trimmed = line.trim();
+
+ // Skip Spring preloader messages
+ if RE_SPRING.is_match(trimmed) {
+ continue;
+ }
+
+ // Skip lines starting with "DEPRECATION WARNING:" (single-line only)
+ if RE_DEPRECATION.is_match(trimmed) {
+ continue;
+ }
+
+ // Skip "Finished in N seconds" line
+ if RE_FINISHED_IN.is_match(trimmed) {
+ continue;
+ }
+
+ // SimpleCov block detection: once we see it, skip until blank line
+ if RE_SIMPLECOV.is_match(trimmed) {
+ in_simplecov_block = true;
+ continue;
+ }
+ if in_simplecov_block {
+ if trimmed.is_empty() {
+ in_simplecov_block = false;
+ }
+ continue;
+ }
+
+ // Capybara screenshots: keep only the path
+ if let Some(caps) = RE_SCREENSHOT.captures(trimmed) {
+ if let Some(path) = caps.get(1) {
+ result.push(format!("[screenshot: {}]", path.as_str().trim()));
+ continue;
+ }
+ }
+
+ result.push(line.to_string());
+ }
+
+ result.join("\n")
+}
+
+// ── Output filtering ─────────────────────────────────────────────────────────
+
+fn filter_rspec_output(output: &str) -> String {
+ if output.trim().is_empty() {
+ return "RSpec: No output".to_string();
+ }
+
+ // Try parsing as JSON first (happy path when --format json is injected)
+ if let Ok(rspec) = serde_json::from_str::(output) {
+ return build_rspec_summary(&rspec);
+ }
+
+ // Strip noise (Spring, SimpleCov, etc.) and retry JSON parse
+ let stripped = strip_noise(output);
+ match serde_json::from_str::(&stripped) {
+ Ok(rspec) => return build_rspec_summary(&rspec),
+ Err(e) => {
+ eprintln!(
+ "[rtk] rspec: JSON parse failed ({}), using text fallback",
+ e
+ );
+ }
+ }
+
+ filter_rspec_text(&stripped)
+}
+
+fn build_rspec_summary(rspec: &RspecOutput) -> String {
+ let s = &rspec.summary;
+
+ if s.example_count == 0 && s.errors_outside_of_examples_count == 0 {
+ return "RSpec: No examples found".to_string();
+ }
+
+ if s.example_count == 0 && s.errors_outside_of_examples_count > 0 {
+ return format!(
+ "RSpec: {} errors outside of examples ({:.2}s)",
+ s.errors_outside_of_examples_count, s.duration
+ );
+ }
+
+ if s.failure_count == 0 && s.errors_outside_of_examples_count == 0 {
+ let passed = s.example_count.saturating_sub(s.pending_count);
+ let mut result = format!("✓ RSpec: {} passed", passed);
+ if s.pending_count > 0 {
+ result.push_str(&format!(", {} pending", s.pending_count));
+ }
+ result.push_str(&format!(" ({:.2}s)", s.duration));
+ return result;
+ }
+
+ let passed = s
+ .example_count
+ .saturating_sub(s.failure_count + s.pending_count);
+ let mut result = format!("RSpec: {} passed, {} failed", passed, s.failure_count);
+ if s.pending_count > 0 {
+ result.push_str(&format!(", {} pending", s.pending_count));
+ }
+ result.push_str(&format!(" ({:.2}s)\n", s.duration));
+ result.push_str("═══════════════════════════════════════\n");
+
+ let failures: Vec<&RspecExample> = rspec
+ .examples
+ .iter()
+ .filter(|e| e.status == "failed")
+ .collect();
+
+ if failures.is_empty() {
+ return result.trim().to_string();
+ }
+
+ result.push_str("\nFailures:\n");
+
+ for (i, example) in failures.iter().take(5).enumerate() {
+ result.push_str(&format!(
+ "{}. ❌ {}\n {}:{}\n",
+ i + 1,
+ example.full_description,
+ example.file_path,
+ example.line_number
+ ));
+
+ if let Some(exc) = &example.exception {
+ let short_class = exc.class.split("::").last().unwrap_or(&exc.class);
+ let first_msg = exc.message.lines().next().unwrap_or("");
+ result.push_str(&format!(
+ " {}: {}\n",
+ short_class,
+ truncate(first_msg, 120)
+ ));
+
+ // First backtrace line not from gems/rspec internals
+ for bt in &exc.backtrace {
+ if !bt.contains("/gems/") && !bt.contains("lib/rspec") {
+ result.push_str(&format!(" {}\n", truncate(bt, 120)));
+ break;
+ }
+ }
+ }
+
+ if i < failures.len().min(5) - 1 {
+ result.push('\n');
+ }
+ }
+
+ if failures.len() > 5 {
+ result.push_str(&format!("\n... +{} more failures\n", failures.len() - 5));
+ }
+
+ result.trim().to_string()
+}
+
+/// State machine text fallback parser for when JSON is unavailable.
+fn filter_rspec_text(output: &str) -> String {
+ #[derive(PartialEq)]
+ enum State {
+ Header,
+ Failures,
+ FailedExamples,
+ Summary,
+ }
+
+ let mut state = State::Header;
+ let mut failures: Vec = Vec::new();
+ let mut current_failure = String::new();
+ let mut summary_line = String::new();
+
+ for line in output.lines() {
+ let trimmed = line.trim();
+
+ match state {
+ State::Header => {
+ if trimmed == "Failures:" {
+ state = State::Failures;
+ } else if trimmed == "Failed examples:" {
+ state = State::FailedExamples;
+ } else if RE_RSPEC_SUMMARY.is_match(trimmed) {
+ summary_line = trimmed.to_string();
+ state = State::Summary;
+ }
+ }
+ State::Failures => {
+ // New failure block starts with numbered pattern like " 1) ..."
+ if is_numbered_failure(trimmed) {
+ if !current_failure.trim().is_empty() {
+ failures.push(compact_failure_block(¤t_failure));
+ }
+ current_failure = trimmed.to_string();
+ current_failure.push('\n');
+ } else if trimmed == "Failed examples:" {
+ if !current_failure.trim().is_empty() {
+ failures.push(compact_failure_block(¤t_failure));
+ }
+ current_failure.clear();
+ state = State::FailedExamples;
+ } else if RE_RSPEC_SUMMARY.is_match(trimmed) {
+ if !current_failure.trim().is_empty() {
+ failures.push(compact_failure_block(¤t_failure));
+ }
+ current_failure.clear();
+ summary_line = trimmed.to_string();
+ state = State::Summary;
+ } else if !trimmed.is_empty() {
+ // Skip gem-internal backtrace lines
+ if is_gem_backtrace(trimmed) {
+ continue;
+ }
+ current_failure.push_str(trimmed);
+ current_failure.push('\n');
+ }
+ }
+ State::FailedExamples => {
+ if RE_RSPEC_SUMMARY.is_match(trimmed) {
+ summary_line = trimmed.to_string();
+ state = State::Summary;
+ }
+ // Skip "Failed examples:" section (just rspec commands to re-run)
+ }
+ State::Summary => {
+ break;
+ }
+ }
+ }
+
+ // Capture remaining failure
+ if !current_failure.trim().is_empty() && state == State::Failures {
+ failures.push(compact_failure_block(¤t_failure));
+ }
+
+ // If we found a summary line, build result
+ if !summary_line.is_empty() {
+ if failures.is_empty() {
+ return format!("RSpec: {}", summary_line);
+ }
+ let mut result = format!("RSpec: {}\n", summary_line);
+ result.push_str("═══════════════════════════════════════\n\n");
+ for (i, failure) in failures.iter().take(5).enumerate() {
+ result.push_str(&format!("{}. ❌ {}\n", i + 1, failure));
+ if i < failures.len().min(5) - 1 {
+ result.push('\n');
+ }
+ }
+ if failures.len() > 5 {
+ result.push_str(&format!("\n... +{} more failures\n", failures.len() - 5));
+ }
+ return result.trim().to_string();
+ }
+
+ // Fallback: look for summary anywhere
+ for line in output.lines().rev() {
+ let t = line.trim();
+ if t.contains("example") && (t.contains("failure") || t.contains("pending")) {
+ return format!("RSpec: {}", t);
+ }
+ }
+
+ // Last resort: last 5 lines
+ fallback_tail(output, "rspec", 5)
+}
+
+/// Check if a line is a numbered failure like "1) User#full_name..."
+fn is_numbered_failure(line: &str) -> bool {
+ let trimmed = line.trim();
+ if let Some(pos) = trimmed.find(')') {
+ let prefix = &trimmed[..pos];
+ prefix.chars().all(|c| c.is_ascii_digit()) && !prefix.is_empty()
+ } else {
+ false
+ }
+}
+
+/// Check if a backtrace line is from gems/rspec internals.
+fn is_gem_backtrace(line: &str) -> bool {
+ line.contains("/gems/")
+ || line.contains("lib/rspec")
+ || line.contains("lib/ruby/")
+ || line.contains("vendor/bundle")
+}
+
+/// Compact a failure block: extract key info, strip verbose backtrace.
+fn compact_failure_block(block: &str) -> String {
+ let mut lines: Vec<&str> = block.lines().collect();
+
+ // Remove empty lines
+ lines.retain(|l| !l.trim().is_empty());
+
+ // Extract spec file:line (lines starting with # ./spec/ or # ./test/)
+ let mut spec_file = String::new();
+ let mut kept_lines: Vec = Vec::new();
+
+ for line in &lines {
+ let t = line.trim();
+ if t.starts_with("# ./spec/") || t.starts_with("# ./test/") {
+ spec_file = t.trim_start_matches("# ").to_string();
+ } else if t.starts_with('#') && (t.contains("/gems/") || t.contains("lib/rspec")) {
+ // Skip gem backtrace
+ continue;
+ } else {
+ kept_lines.push(t.to_string());
+ }
+ }
+
+ let mut result = kept_lines.join("\n ");
+ if !spec_file.is_empty() {
+ result.push_str(&format!("\n {}", spec_file));
+ }
+ result
+}
+
+// ── Tests ────────────────────────────────────────────────────────────────────
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::utils::count_tokens;
+
+ fn all_pass_json() -> &'static str {
+ r#"{
+ "version": "3.12.0",
+ "examples": [
+ {
+ "id": "./spec/models/user_spec.rb[1:1]",
+ "description": "is valid with valid attributes",
+ "full_description": "User is valid with valid attributes",
+ "status": "passed",
+ "file_path": "./spec/models/user_spec.rb",
+ "line_number": 5,
+ "run_time": 0.001234,
+ "pending_message": null,
+ "exception": null
+ },
+ {
+ "id": "./spec/models/user_spec.rb[1:2]",
+ "description": "validates email format",
+ "full_description": "User validates email format",
+ "status": "passed",
+ "file_path": "./spec/models/user_spec.rb",
+ "line_number": 12,
+ "run_time": 0.0008,
+ "pending_message": null,
+ "exception": null
+ }
+ ],
+ "summary": {
+ "duration": 0.015,
+ "example_count": 2,
+ "failure_count": 0,
+ "pending_count": 0,
+ "errors_outside_of_examples_count": 0
+ },
+ "summary_line": "2 examples, 0 failures"
+ }"#
+ }
+
+ fn with_failures_json() -> &'static str {
+ r#"{
+ "version": "3.12.0",
+ "examples": [
+ {
+ "id": "./spec/models/user_spec.rb[1:1]",
+ "description": "is valid",
+ "full_description": "User is valid",
+ "status": "passed",
+ "file_path": "./spec/models/user_spec.rb",
+ "line_number": 5,
+ "run_time": 0.001,
+ "pending_message": null,
+ "exception": null
+ },
+ {
+ "id": "./spec/models/user_spec.rb[1:2]",
+ "description": "saves to database",
+ "full_description": "User saves to database",
+ "status": "failed",
+ "file_path": "./spec/models/user_spec.rb",
+ "line_number": 10,
+ "run_time": 0.002,
+ "pending_message": null,
+ "exception": {
+ "class": "RSpec::Expectations::ExpectationNotMetError",
+ "message": "expected true but got false",
+ "backtrace": [
+ "/usr/local/lib/ruby/gems/3.2.0/gems/rspec-expectations-3.12.0/lib/rspec/expectations/fail_with.rb:37:in `fail_with'",
+ "./spec/models/user_spec.rb:11:in `block (2 levels) in '"
+ ]
+ }
+ }
+ ],
+ "summary": {
+ "duration": 0.123,
+ "example_count": 2,
+ "failure_count": 1,
+ "pending_count": 0,
+ "errors_outside_of_examples_count": 0
+ },
+ "summary_line": "2 examples, 1 failure"
+ }"#
+ }
+
+ fn with_pending_json() -> &'static str {
+ r#"{
+ "version": "3.12.0",
+ "examples": [
+ {
+ "id": "./spec/models/post_spec.rb[1:1]",
+ "description": "creates a post",
+ "full_description": "Post creates a post",
+ "status": "passed",
+ "file_path": "./spec/models/post_spec.rb",
+ "line_number": 4,
+ "run_time": 0.002,
+ "pending_message": null,
+ "exception": null
+ },
+ {
+ "id": "./spec/models/post_spec.rb[1:2]",
+ "description": "validates title",
+ "full_description": "Post validates title",
+ "status": "pending",
+ "file_path": "./spec/models/post_spec.rb",
+ "line_number": 8,
+ "run_time": 0.0,
+ "pending_message": "Not yet implemented",
+ "exception": null
+ }
+ ],
+ "summary": {
+ "duration": 0.05,
+ "example_count": 2,
+ "failure_count": 0,
+ "pending_count": 1,
+ "errors_outside_of_examples_count": 0
+ },
+ "summary_line": "2 examples, 0 failures, 1 pending"
+ }"#
+ }
+
+ fn large_suite_json() -> &'static str {
+ r#"{
+ "version": "3.12.0",
+ "examples": [
+ {"id":"1","description":"test1","full_description":"Suite test1","status":"passed","file_path":"./spec/a_spec.rb","line_number":1,"run_time":0.01,"pending_message":null,"exception":null},
+ {"id":"2","description":"test2","full_description":"Suite test2","status":"passed","file_path":"./spec/a_spec.rb","line_number":2,"run_time":0.01,"pending_message":null,"exception":null},
+ {"id":"3","description":"test3","full_description":"Suite test3","status":"passed","file_path":"./spec/a_spec.rb","line_number":3,"run_time":0.01,"pending_message":null,"exception":null},
+ {"id":"4","description":"test4","full_description":"Suite test4","status":"passed","file_path":"./spec/a_spec.rb","line_number":4,"run_time":0.01,"pending_message":null,"exception":null},
+ {"id":"5","description":"test5","full_description":"Suite test5","status":"passed","file_path":"./spec/a_spec.rb","line_number":5,"run_time":0.01,"pending_message":null,"exception":null},
+ {"id":"6","description":"test6","full_description":"Suite test6","status":"passed","file_path":"./spec/a_spec.rb","line_number":6,"run_time":0.01,"pending_message":null,"exception":null},
+ {"id":"7","description":"test7","full_description":"Suite test7","status":"passed","file_path":"./spec/a_spec.rb","line_number":7,"run_time":0.01,"pending_message":null,"exception":null},
+ {"id":"8","description":"test8","full_description":"Suite test8","status":"passed","file_path":"./spec/a_spec.rb","line_number":8,"run_time":0.01,"pending_message":null,"exception":null},
+ {"id":"9","description":"test9","full_description":"Suite test9","status":"passed","file_path":"./spec/a_spec.rb","line_number":9,"run_time":0.01,"pending_message":null,"exception":null},
+ {"id":"10","description":"test10","full_description":"Suite test10","status":"passed","file_path":"./spec/a_spec.rb","line_number":10,"run_time":0.01,"pending_message":null,"exception":null}
+ ],
+ "summary": {
+ "duration": 1.234,
+ "example_count": 10,
+ "failure_count": 0,
+ "pending_count": 0,
+ "errors_outside_of_examples_count": 0
+ },
+ "summary_line": "10 examples, 0 failures"
+ }"#
+ }
+
+ #[test]
+ fn test_filter_rspec_all_pass() {
+ let result = filter_rspec_output(all_pass_json());
+ assert!(result.starts_with("✓ RSpec:"));
+ assert!(result.contains("2 passed"));
+ assert!(result.contains("0.01s") || result.contains("0.02s"));
+ }
+
+ #[test]
+ fn test_filter_rspec_with_failures() {
+ let result = filter_rspec_output(with_failures_json());
+ assert!(result.contains("1 passed, 1 failed"));
+ assert!(result.contains("❌ User saves to database"));
+ assert!(result.contains("user_spec.rb:10"));
+ assert!(result.contains("ExpectationNotMetError"));
+ assert!(result.contains("expected true but got false"));
+ }
+
+ #[test]
+ fn test_filter_rspec_with_pending() {
+ let result = filter_rspec_output(with_pending_json());
+ assert!(result.starts_with("✓ RSpec:"));
+ assert!(result.contains("1 passed"));
+ assert!(result.contains("1 pending"));
+ }
+
+ #[test]
+ fn test_filter_rspec_empty_output() {
+ let result = filter_rspec_output("");
+ assert_eq!(result, "RSpec: No output");
+ }
+
+ #[test]
+ fn test_filter_rspec_no_examples() {
+ let json = r#"{
+ "version": "3.12.0",
+ "examples": [],
+ "summary": {
+ "duration": 0.001,
+ "example_count": 0,
+ "failure_count": 0,
+ "pending_count": 0,
+ "errors_outside_of_examples_count": 0
+ }
+ }"#;
+ let result = filter_rspec_output(json);
+ assert_eq!(result, "RSpec: No examples found");
+ }
+
+ #[test]
+ fn test_filter_rspec_errors_outside_examples() {
+ let json = r#"{
+ "version": "3.12.0",
+ "examples": [],
+ "summary": {
+ "duration": 0.01,
+ "example_count": 0,
+ "failure_count": 0,
+ "pending_count": 0,
+ "errors_outside_of_examples_count": 1
+ }
+ }"#;
+ let result = filter_rspec_output(json);
+ // Should NOT say "No examples found" — there was an error outside examples
+ assert!(
+ !result.contains("No examples found"),
+ "errors outside examples should not be treated as 'no examples': {}",
+ result
+ );
+ }
+
+ #[test]
+ fn test_filter_rspec_text_fallback() {
+ let text = r#"
+..F.
+
+Failures:
+
+ 1) User is valid
+ Failure/Error: expect(user).to be_valid
+ expected true got false
+ # ./spec/models/user_spec.rb:5
+
+4 examples, 1 failure
+"#;
+ let result = filter_rspec_output(text);
+ assert!(result.contains("RSpec:"));
+ assert!(result.contains("4 examples, 1 failure"));
+ assert!(result.contains("❌"), "should show failure marker");
+ }
+
+ #[test]
+ fn test_filter_rspec_text_fallback_extracts_failures() {
+ let text = r#"Randomized with seed 12345
+..F...E..
+
+Failures:
+
+ 1) User#full_name returns first and last name
+ Failure/Error: expect(user.full_name).to eq("John Doe")
+ expected: "John Doe"
+ got: "John D."
+ # /usr/local/lib/ruby/gems/3.2.0/gems/rspec-expectations-3.12.0/lib/rspec/expectations/fail_with.rb:37
+ # ./spec/models/user_spec.rb:15
+
+ 2) Api::Controller#index fails
+ Failure/Error: get :index
+ expected 200 got 500
+ # ./spec/controllers/api_spec.rb:42
+
+9 examples, 2 failures
+"#;
+ let result = filter_rspec_text(text);
+ assert!(result.contains("2 failures"));
+ assert!(result.contains("❌"));
+ // Should show spec file path, not gem backtrace
+ assert!(result.contains("spec/models/user_spec.rb:15"));
+ }
+
+ #[test]
+ fn test_filter_rspec_backtrace_filters_gems() {
+ let result = filter_rspec_output(with_failures_json());
+ // Should show the spec file backtrace, not the gem one
+ assert!(result.contains("user_spec.rb:11"));
+ assert!(!result.contains("gems/rspec-expectations"));
+ }
+
+ #[test]
+ fn test_filter_rspec_exception_class_shortened() {
+ let result = filter_rspec_output(with_failures_json());
+ // Should show "ExpectationNotMetError" not "RSpec::Expectations::ExpectationNotMetError"
+ assert!(result.contains("ExpectationNotMetError"));
+ assert!(!result.contains("RSpec::Expectations::ExpectationNotMetError"));
+ }
+
+ #[test]
+ fn test_filter_rspec_many_failures_caps_at_five() {
+ let json = r#"{
+ "version": "3.12.0",
+ "examples": [
+ {"id":"1","description":"test 1","full_description":"A test 1","status":"failed","file_path":"./spec/a_spec.rb","line_number":5,"run_time":0.001,"pending_message":null,"exception":{"class":"RuntimeError","message":"boom 1","backtrace":["./spec/a_spec.rb:6:in `block'"]}},
+ {"id":"2","description":"test 2","full_description":"A test 2","status":"failed","file_path":"./spec/a_spec.rb","line_number":10,"run_time":0.001,"pending_message":null,"exception":{"class":"RuntimeError","message":"boom 2","backtrace":["./spec/a_spec.rb:11:in `block'"]}},
+ {"id":"3","description":"test 3","full_description":"A test 3","status":"failed","file_path":"./spec/a_spec.rb","line_number":15,"run_time":0.001,"pending_message":null,"exception":{"class":"RuntimeError","message":"boom 3","backtrace":["./spec/a_spec.rb:16:in `block'"]}},
+ {"id":"4","description":"test 4","full_description":"A test 4","status":"failed","file_path":"./spec/a_spec.rb","line_number":20,"run_time":0.001,"pending_message":null,"exception":{"class":"RuntimeError","message":"boom 4","backtrace":["./spec/a_spec.rb:21:in `block'"]}},
+ {"id":"5","description":"test 5","full_description":"A test 5","status":"failed","file_path":"./spec/a_spec.rb","line_number":25,"run_time":0.001,"pending_message":null,"exception":{"class":"RuntimeError","message":"boom 5","backtrace":["./spec/a_spec.rb:26:in `block'"]}},
+ {"id":"6","description":"test 6","full_description":"A test 6","status":"failed","file_path":"./spec/a_spec.rb","line_number":30,"run_time":0.001,"pending_message":null,"exception":{"class":"RuntimeError","message":"boom 6","backtrace":["./spec/a_spec.rb:31:in `block'"]}}
+ ],
+ "summary": {
+ "duration": 0.05,
+ "example_count": 6,
+ "failure_count": 6,
+ "pending_count": 0,
+ "errors_outside_of_examples_count": 0
+ },
+ "summary_line": "6 examples, 6 failures"
+ }"#;
+ let result = filter_rspec_output(json);
+ assert!(result.contains("1. ❌"), "should show first failure");
+ assert!(result.contains("5. ❌"), "should show fifth failure");
+ assert!(!result.contains("6. ❌"), "should not show sixth inline");
+ assert!(
+ result.contains("+1 more"),
+ "should show overflow count: {}",
+ result
+ );
+ }
+
+ #[test]
+ fn test_filter_rspec_text_fallback_no_summary() {
+ // If no summary line, returns last 5 lines (does not panic)
+ let text = "some output\nwithout a summary line";
+ let result = filter_rspec_output(text);
+ assert!(!result.is_empty());
+ }
+
+ #[test]
+ fn test_filter_rspec_invalid_json_falls_back() {
+ let garbage = "not json at all { broken";
+ let result = filter_rspec_output(garbage);
+ assert!(!result.is_empty(), "should not panic on invalid JSON");
+ }
+
+ // ── Noise stripping tests ────────────────────────────────────────────────
+
+ #[test]
+ fn test_strip_noise_spring() {
+ let input = "Running via Spring preloader in process 12345\n...\n3 examples, 0 failures";
+ let result = strip_noise(input);
+ assert!(!result.contains("Spring"));
+ assert!(result.contains("3 examples"));
+ }
+
+ #[test]
+ fn test_strip_noise_simplecov() {
+ let input = "...\n\nCoverage report generated for RSpec to /app/coverage.\n142 / 200 LOC (71.0%) covered.\n\n3 examples, 0 failures";
+ let result = strip_noise(input);
+ assert!(!result.contains("Coverage report"));
+ assert!(!result.contains("LOC"));
+ assert!(result.contains("3 examples"));
+ }
+
+ #[test]
+ fn test_strip_noise_deprecation() {
+ let input = "DEPRECATION WARNING: Using `return` in before callbacks is deprecated.\n...\n3 examples, 0 failures";
+ let result = strip_noise(input);
+ assert!(!result.contains("DEPRECATION"));
+ assert!(result.contains("3 examples"));
+ }
+
+ #[test]
+ fn test_strip_noise_finished_in() {
+ let input = "...\nFinished in 12.34 seconds (files took 3.21 seconds to load)\n3 examples, 0 failures";
+ let result = strip_noise(input);
+ assert!(!result.contains("Finished in 12.34"));
+ assert!(result.contains("3 examples"));
+ }
+
+ #[test]
+ fn test_strip_noise_capybara_screenshot() {
+ let input = "...\n saved screenshot to /tmp/capybara/screenshots/2026_failed.png\n3 examples, 1 failure";
+ let result = strip_noise(input);
+ assert!(result.contains("[screenshot:"));
+ assert!(result.contains("failed.png"));
+ assert!(!result.contains("saved screenshot to"));
+ }
+
+ // ── Token savings tests ──────────────────────────────────────────────────
+
+ #[test]
+ fn test_token_savings_all_pass() {
+ let input = large_suite_json();
+ let output = filter_rspec_output(input);
+
+ let input_tokens = count_tokens(input);
+ let output_tokens = count_tokens(&output);
+ let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0);
+
+ assert!(
+ savings >= 60.0,
+ "RSpec all-pass: expected ≥60% savings, got {:.1}% (in={}, out={})",
+ savings,
+ input_tokens,
+ output_tokens
+ );
+ }
+
+ #[test]
+ fn test_token_savings_with_failures() {
+ let input = with_failures_json();
+ let output = filter_rspec_output(input);
+
+ let input_tokens = count_tokens(input);
+ let output_tokens = count_tokens(&output);
+ let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0);
+
+ assert!(
+ savings >= 60.0,
+ "RSpec failures: expected ≥60% savings, got {:.1}% (in={}, out={})",
+ savings,
+ input_tokens,
+ output_tokens
+ );
+ }
+
+ #[test]
+ fn test_token_savings_text_fallback() {
+ let input = r#"Running via Spring preloader in process 12345
+Randomized with seed 54321
+..F...E..F..
+
+Failures:
+
+ 1) User#full_name returns first and last name
+ Failure/Error: expect(user.full_name).to eq("John Doe")
+ expected: "John Doe"
+ got: "John D."
+ # /usr/local/lib/ruby/gems/3.2.0/gems/rspec-expectations-3.12.0/lib/rspec/expectations/fail_with.rb:37
+ # ./spec/models/user_spec.rb:15
+ # /usr/local/lib/ruby/gems/3.2.0/gems/rspec-core-3.12.0/lib/rspec/core/example.rb:258
+
+ 2) Api::Controller#index returns success
+ Failure/Error: get :index
+ expected 200 got 500
+ # /usr/local/lib/ruby/gems/3.2.0/gems/rspec-expectations-3.12.0/lib/rspec/expectations/fail_with.rb:37
+ # ./spec/controllers/api_spec.rb:42
+ # /usr/local/lib/ruby/gems/3.2.0/gems/rspec-core-3.12.0/lib/rspec/core/example.rb:258
+
+Failed examples:
+
+rspec ./spec/models/user_spec.rb:15 # User#full_name returns first and last name
+rspec ./spec/controllers/api_spec.rb:42 # Api::Controller#index returns success
+
+12 examples, 2 failures
+
+Coverage report generated for RSpec to /app/coverage.
+142 / 200 LOC (71.0%) covered.
+"#;
+ let output = filter_rspec_text(input);
+
+ let input_tokens = count_tokens(input);
+ let output_tokens = count_tokens(&output);
+ let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0);
+
+ assert!(
+ savings >= 30.0,
+ "RSpec text fallback: expected ≥30% savings, got {:.1}% (in={}, out={})",
+ savings,
+ input_tokens,
+ output_tokens
+ );
+ }
+
+ // ── ANSI handling tests ────────────────────────────────────────────────
+
+ #[test]
+ fn test_filter_rspec_ansi_wrapped_json() {
+ // ANSI codes around JSON should fall back to text, not panic
+ let input = "\x1b[32m{\"version\":\"3.12.0\"\x1b[0m broken json";
+ let result = filter_rspec_output(input);
+ assert!(!result.is_empty(), "should not panic on ANSI-wrapped JSON");
+ }
+
+ // ── Text fallback >5 failures truncation (Issue 9) ─────────────────────
+
+ #[test]
+ fn test_filter_rspec_text_many_failures_caps_at_five() {
+ let text = r#"Randomized with seed 12345
+.......FFFFFFF
+
+Failures:
+
+ 1) User#full_name fails
+ Failure/Error: expect(true).to eq(false)
+ # ./spec/models/user_spec.rb:5
+
+ 2) Post#title fails
+ Failure/Error: expect(true).to eq(false)
+ # ./spec/models/post_spec.rb:10
+
+ 3) Comment#body fails
+ Failure/Error: expect(true).to eq(false)
+ # ./spec/models/comment_spec.rb:15
+
+ 4) Session#token fails
+ Failure/Error: expect(true).to eq(false)
+ # ./spec/models/session_spec.rb:20
+
+ 5) Profile#avatar fails
+ Failure/Error: expect(true).to eq(false)
+ # ./spec/models/profile_spec.rb:25
+
+ 6) Team#members fails
+ Failure/Error: expect(true).to eq(false)
+ # ./spec/models/team_spec.rb:30
+
+ 7) Role#permissions fails
+ Failure/Error: expect(true).to eq(false)
+ # ./spec/models/role_spec.rb:35
+
+14 examples, 7 failures
+"#;
+ let result = filter_rspec_text(text);
+ assert!(result.contains("1. ❌"), "should show first failure");
+ assert!(result.contains("5. ❌"), "should show fifth failure");
+ assert!(!result.contains("6. ❌"), "should not show sixth inline");
+ assert!(
+ result.contains("+2 more"),
+ "should show overflow count: {}",
+ result
+ );
+ }
+
+ // ── Header -> FailedExamples transition (Issue 13) ──────────────────────
+
+ #[test]
+ fn test_filter_rspec_text_header_to_failed_examples() {
+ // Input that has "Failed examples:" directly (no "Failures:" block),
+ // followed by a summary line
+ let text = r#"..F..
+
+Failed examples:
+
+rspec ./spec/models/user_spec.rb:5 # User is valid
+
+5 examples, 1 failure
+"#;
+ let result = filter_rspec_text(text);
+ assert!(
+ result.contains("5 examples, 1 failure"),
+ "should contain summary: {}",
+ result
+ );
+ assert!(
+ result.contains("RSpec:"),
+ "should have RSpec prefix: {}",
+ result
+ );
+ }
+
+ // ── Format flag detection tests (from PR #534) ───────────────────────
+
+ #[test]
+ fn test_has_format_flag_none() {
+ let args: &[String] = &[];
+ assert!(!args.iter().any(|a| {
+ a == "--format"
+ || a == "-f"
+ || a.starts_with("--format=")
+ || (a.starts_with("-f") && a.len() > 2 && !a.starts_with("--"))
+ }));
+ }
+
+ #[test]
+ fn test_has_format_flag_long() {
+ let args = ["--format".to_string(), "documentation".to_string()];
+ assert!(args.iter().any(|a| a == "--format"));
+ }
+
+ #[test]
+ fn test_has_format_flag_short_combined() {
+ // -fjson, -fj, -fdocumentation
+ for flag in &["-fjson", "-fj", "-fdocumentation"] {
+ let args = [flag.to_string()];
+ assert!(
+ args.iter()
+ .any(|a| a.starts_with("-f") && a.len() > 2 && !a.starts_with("--")),
+ "should detect {}",
+ flag
+ );
+ }
+ }
+
+ #[test]
+ fn test_has_format_flag_equals() {
+ let args = ["--format=json".to_string()];
+ assert!(args.iter().any(|a| a.starts_with("--format=")));
+ }
+}
diff --git a/src/rubocop_cmd.rs b/src/rubocop_cmd.rs
new file mode 100644
index 000000000..db2d0ac4f
--- /dev/null
+++ b/src/rubocop_cmd.rs
@@ -0,0 +1,659 @@
+//! RuboCop linter filter.
+//!
+//! Injects `--format json` for structured output, parses offenses grouped by
+//! file and sorted by severity. Falls back to text parsing for autocorrect mode,
+//! when the user specifies a custom format, or when injected JSON output fails
+//! to parse.
+
+use crate::tracking;
+use crate::utils::{exit_code_from_output, ruby_exec};
+use anyhow::{Context, Result};
+use serde::Deserialize;
+
+// ── JSON structures matching RuboCop's --format json output ─────────────────
+
+#[derive(Deserialize)]
+struct RubocopOutput {
+ files: Vec,
+ summary: RubocopSummary,
+}
+
+#[derive(Deserialize)]
+struct RubocopFile {
+ path: String,
+ offenses: Vec,
+}
+
+#[derive(Deserialize)]
+struct RubocopOffense {
+ cop_name: String,
+ severity: String,
+ message: String,
+ correctable: bool,
+ location: RubocopLocation,
+}
+
+#[derive(Deserialize)]
+struct RubocopLocation {
+ start_line: usize,
+}
+
+#[derive(Deserialize)]
+struct RubocopSummary {
+ offense_count: usize,
+ #[allow(dead_code)]
+ target_file_count: usize,
+ inspected_file_count: usize,
+ #[serde(default)]
+ correctable_offense_count: usize,
+}
+
+// ── Public entry point ───────────────────────────────────────────────────────
+
+pub fn run(args: &[String], verbose: u8) -> Result<()> {
+ let timer = tracking::TimedExecution::start();
+
+ let mut cmd = ruby_exec("rubocop");
+
+ // Detect autocorrect mode
+ let is_autocorrect = args
+ .iter()
+ .any(|a| a == "-a" || a == "-A" || a == "--auto-correct" || a == "--auto-correct-all");
+
+ // Inject --format json unless the user already specified a format
+ let has_format = args
+ .iter()
+ .any(|a| a.starts_with("--format") || a.starts_with("-f"));
+
+ if !has_format && !is_autocorrect {
+ cmd.arg("--format").arg("json");
+ }
+
+ cmd.args(args);
+
+ if verbose > 0 {
+ eprintln!("Running: rubocop {}", args.join(" "));
+ }
+
+ let output = cmd.output().context(
+ "Failed to run rubocop. Is it installed? Try: gem install rubocop or add it to your Gemfile",
+ )?;
+
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ let raw = format!("{}\n{}", stdout, stderr);
+
+ let exit_code = exit_code_from_output(&output, "rubocop");
+
+ let filtered = if stdout.trim().is_empty() && !output.status.success() {
+ "RuboCop: FAILED (no stdout, see stderr below)".to_string()
+ } else if has_format || is_autocorrect {
+ filter_rubocop_text(&stdout)
+ } else {
+ filter_rubocop_json(&stdout)
+ };
+
+ if let Some(hint) = crate::tee::tee_and_hint(&raw, "rubocop", exit_code) {
+ println!("{}\n{}", filtered, hint);
+ } else {
+ println!("{}", filtered);
+ }
+
+ if !stderr.trim().is_empty() && (!output.status.success() || verbose > 0) {
+ eprintln!("{}", stderr.trim());
+ }
+
+ timer.track(
+ &format!("rubocop {}", args.join(" ")),
+ &format!("rtk rubocop {}", args.join(" ")),
+ &raw,
+ &filtered,
+ );
+
+ if !output.status.success() {
+ std::process::exit(exit_code);
+ }
+
+ Ok(())
+}
+
+// ── JSON filtering ───────────────────────────────────────────────────────────
+
+/// Rank severity for ordering: lower = more severe.
+fn severity_rank(severity: &str) -> u8 {
+ match severity {
+ "fatal" | "error" => 0,
+ "warning" => 1,
+ "convention" | "refactor" | "info" => 2,
+ _ => 3,
+ }
+}
+
+fn filter_rubocop_json(output: &str) -> String {
+ if output.trim().is_empty() {
+ return "RuboCop: No output".to_string();
+ }
+
+ let parsed: Result = serde_json::from_str(output);
+ let rubocop = match parsed {
+ Ok(r) => r,
+ Err(e) => {
+ eprintln!("[rtk] rubocop: JSON parse failed ({})", e);
+ return crate::utils::fallback_tail(output, "rubocop (JSON parse error)", 5);
+ }
+ };
+
+ let s = &rubocop.summary;
+
+ if s.offense_count == 0 {
+ return format!("ok ✓ rubocop ({} files)", s.inspected_file_count);
+ }
+
+ // When correctable_offense_count is 0, it could mean the field was absent
+ // (older RuboCop) or genuinely zero. Manual count as consistent fallback.
+ let correctable_count = if s.correctable_offense_count > 0 {
+ s.correctable_offense_count
+ } else {
+ rubocop
+ .files
+ .iter()
+ .flat_map(|f| &f.offenses)
+ .filter(|o| o.correctable)
+ .count()
+ };
+
+ let mut result = format!(
+ "rubocop: {} offenses ({} files)\n",
+ s.offense_count, s.inspected_file_count
+ );
+
+ // Build list of files with offenses, sorted by worst severity then file path
+ let mut files_with_offenses: Vec<&RubocopFile> = rubocop
+ .files
+ .iter()
+ .filter(|f| !f.offenses.is_empty())
+ .collect();
+
+ // Sort files: worst severity first, then alphabetically
+ files_with_offenses.sort_by(|a, b| {
+ let a_worst = a
+ .offenses
+ .iter()
+ .map(|o| severity_rank(&o.severity))
+ .min()
+ .unwrap_or(3);
+ let b_worst = b
+ .offenses
+ .iter()
+ .map(|o| severity_rank(&o.severity))
+ .min()
+ .unwrap_or(3);
+ a_worst.cmp(&b_worst).then(a.path.cmp(&b.path))
+ });
+
+ let max_files = 10;
+ let max_offenses_per_file = 5;
+
+ for file in files_with_offenses.iter().take(max_files) {
+ let short = compact_ruby_path(&file.path);
+ result.push_str(&format!("\n{}\n", short));
+
+ // Sort offenses within file: by severity rank, then by line number
+ let mut sorted_offenses: Vec<&RubocopOffense> = file.offenses.iter().collect();
+ sorted_offenses.sort_by(|a, b| {
+ severity_rank(&a.severity)
+ .cmp(&severity_rank(&b.severity))
+ .then(a.location.start_line.cmp(&b.location.start_line))
+ });
+
+ for offense in sorted_offenses.iter().take(max_offenses_per_file) {
+ let first_msg_line = offense.message.lines().next().unwrap_or("");
+ result.push_str(&format!(
+ " :{} {} — {}\n",
+ offense.location.start_line, offense.cop_name, first_msg_line
+ ));
+ }
+ if sorted_offenses.len() > max_offenses_per_file {
+ result.push_str(&format!(
+ " ... +{} more\n",
+ sorted_offenses.len() - max_offenses_per_file
+ ));
+ }
+ }
+
+ if files_with_offenses.len() > max_files {
+ result.push_str(&format!(
+ "\n... +{} more files\n",
+ files_with_offenses.len() - max_files
+ ));
+ }
+
+ if correctable_count > 0 {
+ result.push_str(&format!(
+ "\n({} correctable, run `rubocop -A`)",
+ correctable_count
+ ));
+ }
+
+ result.trim().to_string()
+}
+
+// ── Text fallback ────────────────────────────────────────────────────────────
+
+fn filter_rubocop_text(output: &str) -> String {
+ // Check for Ruby/Bundler errors first -- show error, truncated to avoid excessive tokens
+ for line in output.lines() {
+ let t = line.trim();
+ if t.contains("cannot load such file")
+ || t.contains("Bundler::GemNotFound")
+ || t.contains("Gem::MissingSpecError")
+ || t.starts_with("rubocop: command not found")
+ || t.starts_with("rubocop: No such file")
+ {
+ let error_lines: Vec<&str> = output.trim().lines().take(20).collect();
+ let truncated = error_lines.join("\n");
+ let total_lines = output.trim().lines().count();
+ if total_lines > 20 {
+ return format!(
+ "RuboCop error:\n{}\n... ({} more lines)",
+ truncated,
+ total_lines - 20
+ );
+ }
+ return format!("RuboCop error:\n{}", truncated);
+ }
+ }
+
+ // Detect autocorrect summary: "N files inspected, M offenses detected, K offenses autocorrected"
+ for line in output.lines().rev() {
+ let t = line.trim();
+ if t.contains("inspected") && t.contains("autocorrected") {
+ // Extract counts for compact autocorrect message
+ let files = extract_leading_number(t);
+ let corrected = extract_autocorrect_count(t);
+ if files > 0 && corrected > 0 {
+ return format!(
+ "ok ✓ rubocop -A ({} files, {} autocorrected)",
+ files, corrected
+ );
+ }
+ return format!("RuboCop: {}", t);
+ }
+ if t.contains("inspected") && (t.contains("offense") || t.contains("no offenses")) {
+ if t.contains("no offenses") {
+ let files = extract_leading_number(t);
+ if files > 0 {
+ return format!("ok ✓ rubocop ({} files)", files);
+ }
+ return "ok ✓ rubocop (no offenses)".to_string();
+ }
+ return format!("RuboCop: {}", t);
+ }
+ }
+ // Last resort: last 5 lines
+ crate::utils::fallback_tail(output, "rubocop", 5)
+}
+
+/// Extract leading number from a string like "15 files inspected".
+fn extract_leading_number(s: &str) -> usize {
+ s.split_whitespace()
+ .next()
+ .and_then(|w| w.parse().ok())
+ .unwrap_or(0)
+}
+
+/// Extract autocorrect count from summary like "... 3 offenses autocorrected".
+fn extract_autocorrect_count(s: &str) -> usize {
+ // Look for "N offenses autocorrected" near end
+ let parts: Vec<&str> = s.split(',').collect();
+ for part in parts.iter().rev() {
+ let t = part.trim();
+ if t.contains("autocorrected") {
+ return extract_leading_number(t);
+ }
+ }
+ 0
+}
+
+/// Compact Ruby file path by finding the nearest Rails convention directory
+/// and stripping the absolute path prefix.
+fn compact_ruby_path(path: &str) -> String {
+ let path = path.replace('\\', "/");
+
+ for prefix in &[
+ "app/models/",
+ "app/controllers/",
+ "app/views/",
+ "app/helpers/",
+ "app/services/",
+ "app/jobs/",
+ "app/mailers/",
+ "lib/",
+ "spec/",
+ "test/",
+ "config/",
+ ] {
+ if let Some(pos) = path.find(prefix) {
+ return path[pos..].to_string();
+ }
+ }
+
+ // Generic: strip up to last known directory marker
+ if let Some(pos) = path.rfind("/app/") {
+ return path[pos + 1..].to_string();
+ }
+ if let Some(pos) = path.rfind('/') {
+ return path[pos + 1..].to_string();
+ }
+ path
+}
+
+// ── Tests ────────────────────────────────────────────────────────────────────
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::utils::count_tokens;
+
+ fn no_offenses_json() -> &'static str {
+ r#"{
+ "metadata": {"rubocop_version": "1.60.0"},
+ "files": [],
+ "summary": {
+ "offense_count": 0,
+ "target_file_count": 0,
+ "inspected_file_count": 15
+ }
+ }"#
+ }
+
+ fn with_offenses_json() -> &'static str {
+ r#"{
+ "metadata": {"rubocop_version": "1.60.0"},
+ "files": [
+ {
+ "path": "app/models/user.rb",
+ "offenses": [
+ {
+ "severity": "convention",
+ "message": "Trailing whitespace detected.",
+ "cop_name": "Layout/TrailingWhitespace",
+ "correctable": true,
+ "location": {"start_line": 10, "start_column": 5, "last_line": 10, "last_column": 8, "length": 3, "line": 10, "column": 5}
+ },
+ {
+ "severity": "convention",
+ "message": "Missing frozen string literal comment.",
+ "cop_name": "Style/FrozenStringLiteralComment",
+ "correctable": true,
+ "location": {"start_line": 1, "start_column": 1, "last_line": 1, "last_column": 1, "length": 1, "line": 1, "column": 1}
+ },
+ {
+ "severity": "warning",
+ "message": "Useless assignment to variable - `x`.",
+ "cop_name": "Lint/UselessAssignment",
+ "correctable": false,
+ "location": {"start_line": 25, "start_column": 5, "last_line": 25, "last_column": 6, "length": 1, "line": 25, "column": 5}
+ }
+ ]
+ },
+ {
+ "path": "app/controllers/users_controller.rb",
+ "offenses": [
+ {
+ "severity": "convention",
+ "message": "Trailing whitespace detected.",
+ "cop_name": "Layout/TrailingWhitespace",
+ "correctable": true,
+ "location": {"start_line": 5, "start_column": 20, "last_line": 5, "last_column": 22, "length": 2, "line": 5, "column": 20}
+ },
+ {
+ "severity": "error",
+ "message": "Syntax error, unexpected end-of-input.",
+ "cop_name": "Lint/Syntax",
+ "correctable": false,
+ "location": {"start_line": 30, "start_column": 1, "last_line": 30, "last_column": 1, "length": 1, "line": 30, "column": 1}
+ }
+ ]
+ }
+ ],
+ "summary": {
+ "offense_count": 5,
+ "target_file_count": 2,
+ "inspected_file_count": 20
+ }
+ }"#
+ }
+
+ #[test]
+ fn test_filter_rubocop_no_offenses() {
+ let result = filter_rubocop_json(no_offenses_json());
+ assert_eq!(result, "ok ✓ rubocop (15 files)");
+ }
+
+ #[test]
+ fn test_filter_rubocop_with_offenses_per_file() {
+ let result = filter_rubocop_json(with_offenses_json());
+ // Should show per-file offenses
+ assert!(result.contains("5 offenses (20 files)"));
+ // controllers file has error severity, should appear first
+ assert!(result.contains("app/controllers/users_controller.rb"));
+ assert!(result.contains("app/models/user.rb"));
+ // Per-file offense format: :line CopName — message
+ assert!(result.contains(":30 Lint/Syntax — Syntax error"));
+ assert!(result.contains(":10 Layout/TrailingWhitespace — Trailing whitespace"));
+ assert!(result.contains(":25 Lint/UselessAssignment — Useless assignment"));
+ }
+
+ #[test]
+ fn test_filter_rubocop_severity_ordering() {
+ let result = filter_rubocop_json(with_offenses_json());
+ // File with error should come before file with only convention/warning
+ let ctrl_pos = result.find("users_controller.rb").unwrap();
+ let model_pos = result.find("app/models/user.rb").unwrap();
+ assert!(
+ ctrl_pos < model_pos,
+ "Error-file should appear before convention-file"
+ );
+
+ // Within users_controller.rb, error should come before convention
+ let error_pos = result.find(":30 Lint/Syntax").unwrap();
+ let conv_pos = result.find(":5 Layout/TrailingWhitespace").unwrap();
+ assert!(
+ error_pos < conv_pos,
+ "Error offense should appear before convention"
+ );
+ }
+
+ #[test]
+ fn test_filter_rubocop_within_file_line_ordering() {
+ let result = filter_rubocop_json(with_offenses_json());
+ // Within user.rb, warning (line 25) should come before conventions (line 1, 10)
+ let warning_pos = result.find(":25 Lint/UselessAssignment").unwrap();
+ let conv1_pos = result.find(":1 Style/FrozenStringLiteralComment").unwrap();
+ assert!(
+ warning_pos < conv1_pos,
+ "Warning should come before convention within same file"
+ );
+ }
+
+ #[test]
+ fn test_filter_rubocop_correctable_hint() {
+ let result = filter_rubocop_json(with_offenses_json());
+ assert!(result.contains("3 correctable"));
+ assert!(result.contains("rubocop -A"));
+ }
+
+ #[test]
+ fn test_filter_rubocop_text_fallback() {
+ let text = r#"Inspecting 10 files
+..........
+
+10 files inspected, no offenses detected"#;
+ let result = filter_rubocop_text(text);
+ assert_eq!(result, "ok ✓ rubocop (10 files)");
+ }
+
+ #[test]
+ fn test_filter_rubocop_text_autocorrect() {
+ let text = r#"Inspecting 15 files
+...C..CC.......
+
+15 files inspected, 3 offenses detected, 3 offenses autocorrected"#;
+ let result = filter_rubocop_text(text);
+ assert_eq!(result, "ok ✓ rubocop -A (15 files, 3 autocorrected)");
+ }
+
+ #[test]
+ fn test_filter_rubocop_empty_output() {
+ let result = filter_rubocop_json("");
+ assert_eq!(result, "RuboCop: No output");
+ }
+
+ #[test]
+ fn test_filter_rubocop_invalid_json_falls_back() {
+ let garbage = "some ruby warning\n{broken json";
+ let result = filter_rubocop_json(garbage);
+ assert!(!result.is_empty(), "should not panic on invalid JSON");
+ }
+
+ #[test]
+ fn test_compact_ruby_path() {
+ assert_eq!(
+ compact_ruby_path("/home/user/project/app/models/user.rb"),
+ "app/models/user.rb"
+ );
+ assert_eq!(
+ compact_ruby_path("app/controllers/users_controller.rb"),
+ "app/controllers/users_controller.rb"
+ );
+ assert_eq!(
+ compact_ruby_path("/project/spec/models/user_spec.rb"),
+ "spec/models/user_spec.rb"
+ );
+ assert_eq!(
+ compact_ruby_path("lib/tasks/deploy.rake"),
+ "lib/tasks/deploy.rake"
+ );
+ }
+
+ #[test]
+ fn test_filter_rubocop_caps_offenses_per_file() {
+ // File with 7 offenses should show 5 + overflow
+ let json = r#"{
+ "metadata": {"rubocop_version": "1.60.0"},
+ "files": [
+ {
+ "path": "app/models/big.rb",
+ "offenses": [
+ {"severity": "convention", "message": "msg1", "cop_name": "Cop/A", "correctable": false, "location": {"start_line": 1, "start_column": 1}},
+ {"severity": "convention", "message": "msg2", "cop_name": "Cop/B", "correctable": false, "location": {"start_line": 2, "start_column": 1}},
+ {"severity": "convention", "message": "msg3", "cop_name": "Cop/C", "correctable": false, "location": {"start_line": 3, "start_column": 1}},
+ {"severity": "convention", "message": "msg4", "cop_name": "Cop/D", "correctable": false, "location": {"start_line": 4, "start_column": 1}},
+ {"severity": "convention", "message": "msg5", "cop_name": "Cop/E", "correctable": false, "location": {"start_line": 5, "start_column": 1}},
+ {"severity": "convention", "message": "msg6", "cop_name": "Cop/F", "correctable": false, "location": {"start_line": 6, "start_column": 1}},
+ {"severity": "convention", "message": "msg7", "cop_name": "Cop/G", "correctable": false, "location": {"start_line": 7, "start_column": 1}}
+ ]
+ }
+ ],
+ "summary": {"offense_count": 7, "target_file_count": 1, "inspected_file_count": 5}
+ }"#;
+ let result = filter_rubocop_json(json);
+ assert!(result.contains(":5 Cop/E"), "should show 5th offense");
+ assert!(!result.contains(":6 Cop/F"), "should not show 6th inline");
+ assert!(result.contains("+2 more"), "should show overflow");
+ }
+
+ #[test]
+ fn test_filter_rubocop_text_bundler_error() {
+ let text = "Bundler::GemNotFound: Could not find gem 'rubocop' in any sources.";
+ let result = filter_rubocop_text(text);
+ assert!(
+ result.starts_with("RuboCop error:"),
+ "should detect Bundler error: {}",
+ result
+ );
+ assert!(result.contains("GemNotFound"));
+ }
+
+ #[test]
+ fn test_filter_rubocop_text_load_error() {
+ let text =
+ "/usr/lib/ruby/3.2.0/rubygems.rb:250: cannot load such file -- rubocop (LoadError)";
+ let result = filter_rubocop_text(text);
+ assert!(
+ result.starts_with("RuboCop error:"),
+ "should detect load error: {}",
+ result
+ );
+ }
+
+ #[test]
+ fn test_filter_rubocop_text_with_offenses() {
+ let text = r#"Inspecting 5 files
+..C..
+
+5 files inspected, 1 offense detected"#;
+ let result = filter_rubocop_text(text);
+ assert_eq!(result, "RuboCop: 5 files inspected, 1 offense detected");
+ }
+
+ #[test]
+ fn test_severity_rank() {
+ assert!(severity_rank("error") < severity_rank("warning"));
+ assert!(severity_rank("warning") < severity_rank("convention"));
+ assert!(severity_rank("fatal") < severity_rank("warning"));
+ }
+
+ #[test]
+ fn test_token_savings() {
+ let input = with_offenses_json();
+ let output = filter_rubocop_json(input);
+
+ let input_tokens = count_tokens(input);
+ let output_tokens = count_tokens(&output);
+ let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0);
+
+ assert!(
+ savings >= 60.0,
+ "RuboCop: expected ≥60% savings, got {:.1}% (in={}, out={})",
+ savings,
+ input_tokens,
+ output_tokens
+ );
+ }
+
+ // ── ANSI handling test ──────────────────────────────────────────────────
+
+ #[test]
+ fn test_filter_rubocop_json_with_ansi_prefix() {
+ // ANSI codes before JSON should trigger fallback, not panic
+ let input = "\x1b[33mWarning: something\x1b[0m\n{\"broken\": true}";
+ let result = filter_rubocop_json(input);
+ assert!(!result.is_empty(), "should not panic on ANSI-prefixed JSON");
+ }
+
+ // ── 10-file cap test (Issue 12) ─────────────────────────────────────────
+
+ #[test]
+ fn test_filter_rubocop_caps_at_ten_files() {
+ // Build JSON with 12 files, each having 1 offense
+ let mut files_json = Vec::new();
+ for i in 1..=12 {
+ files_json.push(format!(
+ r#"{{"path": "app/models/model_{}.rb", "offenses": [{{"severity": "convention", "message": "msg{}", "cop_name": "Cop/X{}", "correctable": false, "location": {{"start_line": 1, "start_column": 1}}}}]}}"#,
+ i, i, i
+ ));
+ }
+ let json = format!(
+ r#"{{"metadata": {{"rubocop_version": "1.60.0"}}, "files": [{}], "summary": {{"offense_count": 12, "target_file_count": 12, "inspected_file_count": 12}}}}"#,
+ files_json.join(",")
+ );
+ let result = filter_rubocop_json(&json);
+ assert!(
+ result.contains("+2 more files"),
+ "should show +2 more files overflow: {}",
+ result
+ );
+ }
+}
diff --git a/src/ruff_cmd.rs b/src/ruff_cmd.rs
index 00df94d35..2cfc2dfaf 100644
--- a/src/ruff_cmd.rs
+++ b/src/ruff_cmd.rs
@@ -1,3 +1,4 @@
+use crate::config;
use crate::tracking;
use crate::utils::{resolved_command, truncate};
use anyhow::{Context, Result};
@@ -6,7 +7,9 @@ use std::collections::HashMap;
#[derive(Debug, Deserialize)]
struct RuffLocation {
+ #[allow(dead_code)]
row: usize,
+ #[allow(dead_code)]
column: usize,
}
@@ -19,7 +22,9 @@ struct RuffFix {
#[derive(Debug, Deserialize)]
struct RuffDiagnostic {
code: String,
+ #[allow(dead_code)]
message: String,
+ #[allow(dead_code)]
location: RuffLocation,
#[allow(dead_code)]
end_location: Option,
@@ -121,13 +126,13 @@ pub fn filter_ruff_check_json(output: &str) -> String {
return format!(
"Ruff check (JSON parse failed: {})\n{}",
e,
- truncate(output, 500)
+ truncate(output, config::limits().passthrough_max_chars)
);
}
};
if diagnostics.is_empty() {
- return "✓ Ruff: No issues found".to_string();
+ return "Ruff: No issues found".to_string();
}
let total_issues = diagnostics.len();
@@ -204,7 +209,7 @@ pub fn filter_ruff_check_json(output: &str) -> String {
if fixable_count > 0 {
result.push_str(&format!(
- "\n💡 Run `ruff check --fix` to auto-fix {} issues\n",
+ "\n[hint] Run `ruff check --fix` to auto-fix {} issues\n",
fixable_count
));
}
@@ -237,7 +242,7 @@ pub fn filter_ruff_format(output: &str) -> String {
for part in parts {
let part_lower = part.to_lowercase();
if part_lower.contains("left unchanged") {
- let words: Vec<&str> = part.trim().split_whitespace().collect();
+ let words: Vec<&str> = part.split_whitespace().collect();
// Look for number before "file" or "files"
for (i, word) in words.iter().enumerate() {
if (word == &"file" || word == &"files") && i > 0 {
@@ -257,7 +262,7 @@ pub fn filter_ruff_format(output: &str) -> String {
// Check if all files are formatted
if files_to_format.is_empty() && output_lower.contains("left unchanged") {
- return "✓ Ruff format: All files formatted correctly".to_string();
+ return "Ruff format: All files formatted correctly".to_string();
}
let mut result = String::new();
@@ -265,7 +270,7 @@ pub fn filter_ruff_format(output: &str) -> String {
if output_lower.contains("would reformat") {
// Check mode: show files that need formatting
if files_to_format.is_empty() {
- result.push_str("✓ Ruff format: All files formatted correctly\n");
+ result.push_str("Ruff format: All files formatted correctly\n");
} else {
result.push_str(&format!(
"Ruff format: {} files need formatting\n",
@@ -285,10 +290,10 @@ pub fn filter_ruff_format(output: &str) -> String {
}
if files_checked > 0 {
- result.push_str(&format!("\n✓ {} files already formatted\n", files_checked));
+ result.push_str(&format!("\n{} files already formatted\n", files_checked));
}
- result.push_str("\n💡 Run `ruff format` to format these files\n");
+ result.push_str("\n[hint] Run `ruff format` to format these files\n");
}
} else {
// Write mode or other output - show summary
@@ -323,7 +328,7 @@ mod tests {
fn test_filter_ruff_check_no_issues() {
let output = "[]";
let result = filter_ruff_check_json(output);
- assert!(result.contains("✓ Ruff"));
+ assert!(result.contains("Ruff"));
assert!(result.contains("No issues found"));
}
@@ -369,7 +374,7 @@ mod tests {
fn test_filter_ruff_format_all_formatted() {
let output = "5 files left unchanged";
let result = filter_ruff_format(output);
- assert!(result.contains("✓ Ruff format"));
+ assert!(result.contains("Ruff format"));
assert!(result.contains("All files formatted correctly"));
}
diff --git a/src/runner.rs b/src/runner.rs
index 6ae6599ef..1a2eceed7 100644
--- a/src/runner.rs
+++ b/src/runner.rs
@@ -34,10 +34,10 @@ pub fn run_err(command: &str, verbose: u8) -> Result<()> {
if filtered.is_empty() {
if output.status.success() {
- rtk.push_str("✅ Command completed successfully (no errors)");
+ rtk.push_str("[ok] Command completed successfully (no errors)");
} else {
rtk.push_str(&format!(
- "❌ Command failed (exit code: {:?})\n",
+ "[FAIL] Command failed (exit code: {:?})\n",
output.status.code()
));
let lines: Vec<&str> = raw.lines().collect();
@@ -228,7 +228,7 @@ fn extract_test_summary(output: &str, command: &str) -> String {
let mut output = String::new();
if !failures.is_empty() {
- output.push_str("❌ FAILURES:\n");
+ output.push_str("[FAIL] FAILURES:\n");
for f in failures.iter().take(10) {
output.push_str(&format!(" {}\n", f));
}
@@ -239,13 +239,13 @@ fn extract_test_summary(output: &str, command: &str) -> String {
}
if !result.is_empty() {
- output.push_str("📊 SUMMARY:\n");
+ output.push_str("SUMMARY:\n");
for r in &result {
output.push_str(&format!(" {}\n", r));
}
} else {
// Fallback: show last few lines
- output.push_str("📊 OUTPUT (last 5 lines):\n");
+ output.push_str("OUTPUT (last 5 lines):\n");
let start = lines.len().saturating_sub(5);
for line in &lines[start..] {
if !line.trim().is_empty() {
diff --git a/src/summary.rs b/src/summary.rs
index bea9fe28e..a295b73d6 100644
--- a/src/summary.rs
+++ b/src/summary.rs
@@ -42,7 +42,7 @@ fn summarize_output(output: &str, command: &str, success: bool) -> String {
let mut result = Vec::new();
// Status
- let status_icon = if success { "✅" } else { "❌" };
+ let status_icon = if success { "[ok]" } else { "[FAIL]" };
result.push(format!(
"{} Command: {}",
status_icon,
@@ -96,10 +96,11 @@ fn detect_output_type(output: &str, command: &str) -> OutputType {
OutputType::JsonOutput
} else if output.lines().all(|l| {
l.len() < 200
- && !l
- .contains('\t')
- .then_some(true)
- .unwrap_or(l.split_whitespace().count() < 10)
+ && if l.contains('\t') {
+ false
+ } else {
+ l.split_whitespace().count() < 10
+ }
}) {
OutputType::ListOutput
} else {
@@ -108,7 +109,7 @@ fn detect_output_type(output: &str, command: &str) -> OutputType {
}
fn summarize_tests(output: &str, result: &mut Vec) {
- result.push("📋 Test Results:".to_string());
+ result.push("Test Results:".to_string());
let mut passed = 0;
let mut failed = 0;
@@ -125,7 +126,7 @@ fn summarize_tests(output: &str, result: &mut Vec) {
passed += 1;
}
}
- if lower.contains("failed") || lower.contains("✗") || lower.contains("fail") {
+ if lower.contains("failed") || lower.contains("[x]") || lower.contains("fail") {
if let Some(n) = extract_number(&lower, "failed") {
failed = n;
}
@@ -141,12 +142,12 @@ fn summarize_tests(output: &str, result: &mut Vec) {
}
}
- result.push(format!(" ✅ {} passed", passed));
+ result.push(format!(" [ok] {} passed", passed));
if failed > 0 {
- result.push(format!(" ❌ {} failed", failed));
+ result.push(format!(" [FAIL] {} failed", failed));
}
if skipped > 0 {
- result.push(format!(" ⏭️ {} skipped", skipped));
+ result.push(format!(" skip {} skipped", skipped));
}
if !failures.is_empty() {
@@ -159,7 +160,7 @@ fn summarize_tests(output: &str, result: &mut Vec) {
}
fn summarize_build(output: &str, result: &mut Vec) {
- result.push("🔨 Build Summary:".to_string());
+ result.push("Build Summary:".to_string());
let mut errors = 0;
let mut warnings = 0;
@@ -183,16 +184,16 @@ fn summarize_build(output: &str, result: &mut Vec) {
}
if compiled > 0 {
- result.push(format!(" 📦 {} crates/files compiled", compiled));
+ result.push(format!(" {} crates/files compiled", compiled));
}
if errors > 0 {
- result.push(format!(" ❌ {} errors", errors));
+ result.push(format!(" [error] {} errors", errors));
}
if warnings > 0 {
- result.push(format!(" ⚠️ {} warnings", warnings));
+ result.push(format!(" [warn] {} warnings", warnings));
}
if errors == 0 && warnings == 0 {
- result.push(" ✅ Build successful".to_string());
+ result.push(" [ok] Build successful".to_string());
}
if !error_msgs.is_empty() {
@@ -205,7 +206,7 @@ fn summarize_build(output: &str, result: &mut Vec) {
}
fn summarize_logs_quick(output: &str, result: &mut Vec) {
- result.push("📝 Log Summary:".to_string());
+ result.push("Log Summary:".to_string());
let mut errors = 0;
let mut warnings = 0;
@@ -222,14 +223,14 @@ fn summarize_logs_quick(output: &str, result: &mut Vec) {
}
}
- result.push(format!(" ❌ {} errors", errors));
- result.push(format!(" ⚠️ {} warnings", warnings));
- result.push(format!(" ℹ️ {} info", info));
+ result.push(format!(" [error] {} errors", errors));
+ result.push(format!(" [warn] {} warnings", warnings));
+ result.push(format!(" [info] {} info", info));
}
fn summarize_list(output: &str, result: &mut Vec) {
let lines: Vec<&str> = output.lines().filter(|l| !l.trim().is_empty()).collect();
- result.push(format!("📋 List ({} items):", lines.len()));
+ result.push(format!("List ({} items):", lines.len()));
for line in lines.iter().take(10) {
result.push(format!(" • {}", truncate(line, 70)));
@@ -240,7 +241,7 @@ fn summarize_list(output: &str, result: &mut Vec) {
}
fn summarize_json(output: &str, result: &mut Vec) {
- result.push("📋 JSON Output:".to_string());
+ result.push("JSON Output:".to_string());
// Try to parse and show structure
if let Ok(value) = serde_json::from_str::(output) {
@@ -269,7 +270,7 @@ fn summarize_json(output: &str, result: &mut Vec) {
fn summarize_generic(output: &str, result: &mut Vec) {
let lines: Vec<&str> = output.lines().collect();
- result.push("📋 Output:".to_string());
+ result.push("Output:".to_string());
// First few lines
for line in lines.iter().take(5) {
diff --git a/src/tee.rs b/src/tee.rs
index 90fef5233..1dbbe4e84 100644
--- a/src/tee.rs
+++ b/src/tee.rs
@@ -182,20 +182,15 @@ pub fn tee_and_hint(raw: &str, command_slug: &str, exit_code: i32) -> Option Self {
- Self::Failures
- }
-}
-
/// Configuration for the tee feature.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct TeeConfig {
diff --git a/src/toml_filter.rs b/src/toml_filter.rs
index f72f3c870..0f571626b 100644
--- a/src/toml_filter.rs
+++ b/src/toml_filter.rs
@@ -574,7 +574,7 @@ pub fn run_filter_tests(filter_name_opt: Option<&str>) -> VerifyResults {
.into_iter()
.filter(|name| {
// When a specific filter is requested, only report that one as missing tests
- filter_name_opt.map_or(true, |f| name == f)
+ filter_name_opt.is_none_or(|f| name == f)
})
.filter(|name| !tested_filter_names.contains(name))
.collect();
@@ -1610,8 +1610,8 @@ match_command = "^make\\b"
let filters = make_filters(BUILTIN_TOML);
assert_eq!(
filters.len(),
- 47,
- "Expected exactly 47 built-in filters, got {}. \
+ 58,
+ "Expected exactly 58 built-in filters, got {}. \
Update this count when adding/removing filters in src/filters/.",
filters.len()
);
@@ -1668,11 +1668,11 @@ expected = "output line 1\noutput line 2"
let combined = format!("{}\n\n{}", BUILTIN_TOML, new_filter);
let filters = make_filters(&combined);
- // All 47 existing filters still present + 1 new = 48
+ // All 58 existing filters still present + 1 new = 59
assert_eq!(
filters.len(),
- 48,
- "Expected 48 filters after concat (47 built-in + 1 new)"
+ 59,
+ "Expected 59 filters after concat (58 built-in + 1 new)"
);
// New filter is discoverable
diff --git a/src/tracking.rs b/src/tracking.rs
index 66363a6dc..dd73788a0 100644
--- a/src/tracking.rs
+++ b/src/tracking.rs
@@ -221,6 +221,9 @@ pub struct MonthStats {
pub avg_time_ms: u64,
}
+/// Type alias for command statistics tuple: (command, count, saved_tokens, avg_savings_pct, avg_time_ms)
+type CommandStats = (String, usize, usize, f64, u64);
+
impl Tracker {
/// Create a new tracker instance.
///
@@ -251,6 +254,12 @@ impl Tracker {
}
let conn = Connection::open(&db_path)?;
+ // WAL mode + busy_timeout for concurrent access (multiple Claude Code instances).
+ // Non-fatal: NFS/read-only filesystems may not support WAL.
+ let _ = conn.execute_batch(
+ "PRAGMA journal_mode=WAL;
+ PRAGMA busy_timeout=5000;",
+ );
conn.execute(
"CREATE TABLE IF NOT EXISTS commands (
id INTEGER PRIMARY KEY,
@@ -488,6 +497,7 @@ impl Tracker {
/// summary.total_saved, summary.avg_savings_pct);
/// # Ok::<(), anyhow::Error>(())
/// ```
+ #[allow(dead_code)]
pub fn get_summary(&self) -> Result {
self.get_summary_filtered(None) // delegate to filtered variant
}
@@ -560,7 +570,7 @@ impl Tracker {
fn get_by_command(
&self,
project_path: Option<&str>, // added
- ) -> Result> {
+ ) -> Result> {
let (project_exact, project_glob) = project_filter_params(project_path); // added
let mut stmt = self.conn.prepare(
"SELECT rtk_cmd, COUNT(*), SUM(saved_tokens), AVG(savings_pct), AVG(exec_time_ms)
@@ -851,6 +861,7 @@ impl Tracker {
/// }
/// # Ok::<(), anyhow::Error>(())
/// ```
+ #[allow(dead_code)]
pub fn get_recent(&self, limit: usize) -> Result> {
self.get_recent_filtered(limit, None) // delegate to filtered variant
}
@@ -971,6 +982,7 @@ fn get_db_path() -> Result {
pub struct ParseFailureRecord {
pub timestamp: String,
pub raw_command: String,
+ #[allow(dead_code)]
pub error_message: String,
pub fallback_succeeded: bool,
}
@@ -1175,6 +1187,7 @@ pub fn args_display(args: &[OsString]) -> String {
/// timer.track("ls -la", "rtk ls", "input", "output");
/// ```
#[deprecated(note = "Use TimedExecution instead")]
+#[allow(dead_code)]
pub fn track(original_cmd: &str, rtk_cmd: &str, input: &str, output: &str) {
let input_tokens = estimate_tokens(input);
let output_tokens = estimate_tokens(output);
diff --git a/src/tree.rs b/src/tree.rs
index 39c5ece93..4727a740a 100644
--- a/src/tree.rs
+++ b/src/tree.rs
@@ -125,7 +125,7 @@ fn filter_tree_output(raw: &str) -> String {
}
// Remove trailing empty lines
- while filtered_lines.last().map_or(false, |l| l.trim().is_empty()) {
+ while filtered_lines.last().is_some_and(|l| l.trim().is_empty()) {
filtered_lines.pop();
}
diff --git a/src/trust.rs b/src/trust.rs
index ffe7c83fb..c30f977fc 100644
--- a/src/trust.rs
+++ b/src/trust.rs
@@ -140,6 +140,7 @@ pub fn check_trust(filter_path: &Path) -> Result {
}
/// Store current SHA-256 hash as trusted (computes hash from file).
+#[allow(dead_code)]
pub fn trust_filter(filter_path: &Path) -> Result<()> {
let hash = integrity::compute_hash(filter_path)
.with_context(|| format!("Failed to hash: {}", filter_path.display()))?;
@@ -267,13 +268,13 @@ fn print_risk_summary(content: &str) {
println!(" Filters: {}", filter_count);
if has_replace {
- println!(" ⚠ Contains 'replace' rules (can rewrite output)");
+ println!(" [!] Contains 'replace' rules (can rewrite output)");
}
if has_match_output {
- println!(" ⚠ Contains 'match_output' rules (can replace entire output)");
+ println!(" [!] Contains 'match_output' rules (can replace entire output)");
}
if has_dot_pattern {
- println!(" ⚠ Contains catch-all pattern '.' (matches everything)");
+ println!(" [!] Contains catch-all pattern '.' (matches everything)");
}
if !has_replace && !has_match_output && !has_dot_pattern {
println!(" No high-risk patterns detected.");
diff --git a/src/tsc_cmd.rs b/src/tsc_cmd.rs
index ad4658e1d..0758a149a 100644
--- a/src/tsc_cmd.rs
+++ b/src/tsc_cmd.rs
@@ -109,7 +109,7 @@ fn filter_tsc_output(output: &str) -> String {
if errors.is_empty() {
if output.contains("Found 0 errors") {
- return "✓ TypeScript: No errors found".to_string();
+ return "TypeScript: No errors found".to_string();
}
return "TypeScript compilation completed".to_string();
}
diff --git a/src/utils.rs b/src/utils.rs
index ff84961cc..c1882fa81 100644
--- a/src/utils.rs
+++ b/src/utils.rs
@@ -207,6 +207,58 @@ pub fn ok_confirmation(action: &str, detail: &str) -> String {
}
}
+/// Extract exit code from a process output. Returns the actual exit code, or
+/// `128 + signal` per Unix convention when terminated by a signal (no exit code
+/// available). Falls back to 1 on non-Unix platforms.
+pub fn exit_code_from_output(output: &std::process::Output, label: &str) -> i32 {
+ match output.status.code() {
+ Some(code) => code,
+ None => {
+ #[cfg(unix)]
+ {
+ use std::os::unix::process::ExitStatusExt;
+ if let Some(sig) = output.status.signal() {
+ eprintln!("[rtk] {}: process terminated by signal {}", label, sig);
+ return 128 + sig;
+ }
+ }
+ eprintln!("[rtk] {}: process terminated by signal", label);
+ 1
+ }
+ }
+}
+
+/// Return the last `n` lines of output with a label, for use as a fallback
+/// when filter parsing fails. Logs a diagnostic to stderr.
+pub fn fallback_tail(output: &str, label: &str, n: usize) -> String {
+ eprintln!(
+ "[rtk] {}: output format not recognized, showing last {} lines",
+ label, n
+ );
+ let lines: Vec<&str> = output.lines().collect();
+ let start = lines.len().saturating_sub(n);
+ lines[start..].join("\n")
+}
+
+/// Build a Command for Ruby tools, auto-detecting bundle exec.
+/// Uses `bundle exec ` when a Gemfile exists (transitive deps like rake
+/// won't appear in the Gemfile but still need bundler for version isolation).
+pub fn ruby_exec(tool: &str) -> Command {
+ if std::path::Path::new("Gemfile").exists() {
+ let mut c = Command::new("bundle");
+ c.arg("exec").arg(tool);
+ return c;
+ }
+ Command::new(tool)
+}
+
+/// Count whitespace-delimited tokens in text. Used by filter tests to verify
+/// token savings claims.
+#[cfg(test)]
+pub fn count_tokens(text: &str) -> usize {
+ text.split_whitespace().count()
+}
+
/// Detect the package manager used in the current directory.
/// Returns "pnpm", "yarn", or "npm" based on lockfile presence.
///
diff --git a/src/vitest_cmd.rs b/src/vitest_cmd.rs
index e9c24be34..2d8adb315 100644
--- a/src/vitest_cmd.rs
+++ b/src/vitest_cmd.rs
@@ -3,7 +3,7 @@ use regex::Regex;
use serde::Deserialize;
use crate::parser::{
- emit_degradation_warning, emit_passthrough_warning, extract_json_object, truncate_output,
+ emit_degradation_warning, emit_passthrough_warning, extract_json_object, truncate_passthrough,
FormatMode, OutputParser, ParseResult, TestFailure, TestResult, TokenFormatter,
};
use crate::tracking;
@@ -88,7 +88,7 @@ impl OutputParser for VitestParser {
}
None => {
// Tier 3: Passthrough
- ParseResult::Passthrough(truncate_output(input, 500))
+ ParseResult::Passthrough(truncate_passthrough(input))
}
}
}
@@ -182,7 +182,7 @@ fn extract_failures_regex(output: &str) -> Vec {
while i < lines.len() {
let line = lines[i];
- if line.contains('✗') || line.contains("FAIL") {
+ if line.contains("[x]") || line.contains("FAIL") {
let mut error_lines = vec![line.to_string()];
i += 1;
diff --git a/src/wc_cmd.rs b/src/wc_cmd.rs
index 6ac161929..7cd019988 100644
--- a/src/wc_cmd.rs
+++ b/src/wc_cmd.rs
@@ -167,7 +167,7 @@ fn format_single_line(line: &str, mode: &WcMode) -> String {
WcMode::Mixed => {
// Strip file path, keep numbers only
if parts.len() >= 2 {
- let last_is_path = parts.last().map_or(false, |p| p.parse::().is_err());
+ let last_is_path = parts.last().is_some_and(|p| p.parse::().is_err());
if last_is_path {
parts[..parts.len() - 1].join(" ")
} else {
@@ -202,7 +202,7 @@ fn format_multi_line(lines: &[&str], mode: &WcMode) -> String {
continue;
}
- let is_total = parts.last().map_or(false, |p| *p == "total");
+ let is_total = parts.last().is_some_and(|p| *p == "total");
match mode {
WcMode::Lines | WcMode::Words | WcMode::Bytes | WcMode::Chars => {
@@ -236,7 +236,7 @@ fn format_multi_line(lines: &[&str], mode: &WcMode) -> String {
let nums: Vec<&str> = parts[..parts.len() - 1].to_vec();
result.push(format!("Σ {}", nums.join(" ")));
} else if parts.len() >= 2 {
- let last_is_path = parts.last().map_or(false, |p| p.parse::().is_err());
+ let last_is_path = parts.last().is_some_and(|p| p.parse::().is_err());
if last_is_path {
let name = strip_prefix(parts.last().unwrap_or(&""), &common_prefix);
let nums: Vec<&str> = parts[..parts.len() - 1].to_vec();
diff --git a/src/wget_cmd.rs b/src/wget_cmd.rs
index 548f94a81..722f88f1a 100644
--- a/src/wget_cmd.rs
+++ b/src/wget_cmd.rs
@@ -33,7 +33,7 @@ pub fn run(url: &str, args: &[String], verbose: u8) -> Result<()> {
let filename = extract_filename_from_output(&stderr, url, args);
let size = get_file_size(&filename);
let msg = format!(
- "⬇️ {} ok | {} | {}",
+ "{} ok | {} | {}",
compact_url(url),
filename,
format_size(size)
@@ -42,9 +42,10 @@ pub fn run(url: &str, args: &[String], verbose: u8) -> Result<()> {
timer.track(&format!("wget {}", url), "rtk wget", &raw_output, &msg);
} else {
let error = parse_error(&stderr, &stdout);
- let msg = format!("⬇️ {} FAILED: {}", compact_url(url), error);
+ let msg = format!("{} FAILED: {}", compact_url(url), error);
println!("{}", msg);
timer.track(&format!("wget {}", url), "rtk wget", &raw_output, &msg);
+ std::process::exit(output.status.code().unwrap_or(1));
}
Ok(())
@@ -78,7 +79,7 @@ pub fn run_stdout(url: &str, args: &[String], verbose: u8) -> Result<()> {
let mut rtk_output = String::new();
if total > 20 {
rtk_output.push_str(&format!(
- "⬇️ {} ok | {} lines | {}\n",
+ "{} ok | {} lines | {}\n",
compact_url(url),
total,
format_size(output.stdout.len() as u64)
@@ -89,7 +90,7 @@ pub fn run_stdout(url: &str, args: &[String], verbose: u8) -> Result<()> {
}
rtk_output.push_str(&format!("... +{} more lines", total - 10));
} else {
- rtk_output.push_str(&format!("⬇️ {} ok | {} lines\n", compact_url(url), total));
+ rtk_output.push_str(&format!("{} ok | {} lines\n", compact_url(url), total));
for line in &lines {
rtk_output.push_str(&format!("{}\n", line));
}
@@ -104,9 +105,10 @@ pub fn run_stdout(url: &str, args: &[String], verbose: u8) -> Result<()> {
} else {
let stderr = String::from_utf8_lossy(&output.stderr);
let error = parse_error(&stderr, "");
- let msg = format!("⬇️ {} FAILED: {}", compact_url(url), error);
+ let msg = format!("{} FAILED: {}", compact_url(url), error);
println!("{}", msg);
timer.track(&format!("wget -O - {}", url), "rtk wget -o", &stderr, &msg);
+ std::process::exit(output.status.code().unwrap_or(1));
}
Ok(())
@@ -206,6 +208,7 @@ fn compact_url(url: &str) -> String {
}
}
+#[allow(dead_code)]
fn parse_error(stderr: &str, stdout: &str) -> String {
// Common wget error patterns
let combined = format!("{}\n{}", stderr, stdout);
diff --git a/tests/fixtures/golangci_v2_json.txt b/tests/fixtures/golangci_v2_json.txt
new file mode 100644
index 000000000..959b27f49
--- /dev/null
+++ b/tests/fixtures/golangci_v2_json.txt
@@ -0,0 +1,144 @@
+{
+ "Issues": [
+ {
+ "FromLinter": "errcheck",
+ "Text": "Error return value of `foo` is not checked",
+ "Severity": "error",
+ "SourceLines": [
+ " if err := foo(); err != nil {",
+ " return err",
+ " }"
+ ],
+ "Pos": {
+ "Filename": "pkg/handler/server.go",
+ "Line": 42,
+ "Column": 5,
+ "Offset": 1024
+ },
+ "Replacement": null,
+ "ExpectNoLint": false,
+ "ExpectedNoLintLinter": ""
+ },
+ {
+ "FromLinter": "errcheck",
+ "Text": "Error return value of `bar` is not checked",
+ "Severity": "error",
+ "SourceLines": [
+ " bar()",
+ " return nil",
+ "}"
+ ],
+ "Pos": {
+ "Filename": "pkg/handler/server.go",
+ "Line": 55,
+ "Column": 2,
+ "Offset": 2048
+ },
+ "Replacement": null,
+ "ExpectNoLint": false,
+ "ExpectedNoLintLinter": ""
+ },
+ {
+ "FromLinter": "gosimple",
+ "Text": "S1003: should replace strings.Index with strings.Contains",
+ "Severity": "warning",
+ "SourceLines": [
+ " if strings.Index(s, sub) >= 0 {",
+ " return true",
+ " }"
+ ],
+ "Pos": {
+ "Filename": "pkg/utils/strings.go",
+ "Line": 15,
+ "Column": 2,
+ "Offset": 512
+ },
+ "Replacement": null,
+ "ExpectNoLint": false,
+ "ExpectedNoLintLinter": ""
+ },
+ {
+ "FromLinter": "govet",
+ "Text": "printf: Sprintf format %s has arg of wrong type int",
+ "Severity": "error",
+ "SourceLines": [
+ " fmt.Sprintf(\"%s\", 42)"
+ ],
+ "Pos": {
+ "Filename": "cmd/main/main.go",
+ "Line": 10,
+ "Column": 3,
+ "Offset": 256
+ },
+ "Replacement": null,
+ "ExpectNoLint": false,
+ "ExpectedNoLintLinter": ""
+ },
+ {
+ "FromLinter": "unused",
+ "Text": "func `unusedHelper` is unused",
+ "Severity": "warning",
+ "SourceLines": [
+ "func unusedHelper() {",
+ " // implementation",
+ "}"
+ ],
+ "Pos": {
+ "Filename": "internal/helpers.go",
+ "Line": 100,
+ "Column": 1,
+ "Offset": 4096
+ },
+ "Replacement": null,
+ "ExpectNoLint": false,
+ "ExpectedNoLintLinter": ""
+ },
+ {
+ "FromLinter": "errcheck",
+ "Text": "Error return value of `close` is not checked",
+ "Severity": "error",
+ "SourceLines": [
+ " defer file.Close()"
+ ],
+ "Pos": {
+ "Filename": "pkg/handler/server.go",
+ "Line": 120,
+ "Column": 10,
+ "Offset": 3072
+ },
+ "Replacement": null,
+ "ExpectNoLint": false,
+ "ExpectedNoLintLinter": ""
+ },
+ {
+ "FromLinter": "gosimple",
+ "Text": "S1005: should omit nil check",
+ "Severity": "warning",
+ "SourceLines": [
+ " if m != nil {",
+ " for k, v := range m {",
+ " process(k, v)",
+ " }",
+ " }"
+ ],
+ "Pos": {
+ "Filename": "pkg/utils/strings.go",
+ "Line": 45,
+ "Column": 1,
+ "Offset": 1536
+ },
+ "Replacement": null,
+ "ExpectNoLint": false,
+ "ExpectedNoLintLinter": ""
+ }
+ ],
+ "Report": {
+ "Warnings": [],
+ "Linters": [
+ {"Name": "errcheck", "Enabled": true, "EnabledByDefault": true},
+ {"Name": "gosimple", "Enabled": true, "EnabledByDefault": true},
+ {"Name": "govet", "Enabled": true, "EnabledByDefault": true},
+ {"Name": "unused", "Enabled": true, "EnabledByDefault": true}
+ ]
+ }
+}