From 7b0f5e1ba9b83ff6ce214cd481f7c9eac1d5f87b Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Tue, 31 Mar 2026 15:53:41 +0200 Subject: [PATCH 01/44] feat(core): binary hook w/ native cmd exec + streaming Co-Authored-By: ahundt --- src/cmd/analysis.rs | 693 +++++++++++++++ src/cmd/builtins.rs | 258 ++++++ src/cmd/exec.rs | 472 ++++++++++ src/cmd/filters.rs | 326 +++++++ src/cmd/hook/claude.rs | 539 ++++++++++++ src/cmd/hook/mod.rs | 1536 +++++++++++++++++++++++++++++++++ src/cmd/mod.rs | 19 + src/cmd/predicates.rs | 75 ++ src/cmd/test_helpers.rs | 32 + src/cmds/git/git.rs | 53 +- src/cmds/go/go_cmd.rs | 6 +- src/cmds/js/tsc_cmd.rs | 3 +- src/cmds/python/pytest_cmd.rs | 3 +- src/cmds/rust/cargo_cmd.rs | 3 +- src/cmds/system/pipe_cmd.rs | 450 ++++++++++ src/core/mod.rs | 1 + src/core/stream.rs | 418 +++++++++ src/main.rs | 153 +++- 18 files changed, 5011 insertions(+), 29 deletions(-) create mode 100644 src/cmd/analysis.rs create mode 100644 src/cmd/builtins.rs create mode 100644 src/cmd/exec.rs create mode 100644 src/cmd/filters.rs create mode 100644 src/cmd/hook/claude.rs create mode 100644 src/cmd/hook/mod.rs create mode 100644 src/cmd/mod.rs create mode 100644 src/cmd/predicates.rs create mode 100644 src/cmd/test_helpers.rs create mode 100644 src/cmds/system/pipe_cmd.rs create mode 100644 src/core/stream.rs diff --git a/src/cmd/analysis.rs b/src/cmd/analysis.rs new file mode 100644 index 000000000..114e1ba98 --- /dev/null +++ b/src/cmd/analysis.rs @@ -0,0 +1,693 @@ +use super::lexer::{strip_quotes, ParsedToken, TokenKind}; + +#[derive(Debug, Clone, PartialEq)] +pub struct NativeCommand { + pub binary: String, + pub args: Vec, + pub operator: Option, +} + +pub fn split_safe_suffix(mut tokens: Vec) -> (Vec, String) { + let mut suffixes: Vec = Vec::new(); + + loop { + let n = tokens.len(); + let mut matched_len: usize = 0; + let mut matched_suffix = String::new(); + + if n >= 4 { + let t = &tokens[n - 3..]; + if matches!(t[0].kind, TokenKind::Pipe) + && matches!(t[1].kind, TokenKind::Arg) + && t[1].value == "tee" + && matches!(t[2].kind, TokenKind::Arg) + { + matched_suffix = format!("| tee {}", t[2].value); + matched_len = 3; + } + } + + if matched_len == 0 && n >= 4 { + let t = &tokens[n - 3..]; + if matches!(t[0].kind, TokenKind::Pipe) + && matches!(t[1].kind, TokenKind::Arg) + && matches!(t[1].value.as_str(), "head" | "tail") + && matches!(t[2].kind, TokenKind::Arg) + { + matched_suffix = format!("| {} {}", t[1].value, t[2].value); + matched_len = 3; + } + } + + if matched_len == 0 && n >= 3 { + let t = &tokens[n - 2..]; + if matches!(t[0].kind, TokenKind::Redirect) + && t[0].value.starts_with('2') + && t[0].value.contains('>') + && !t[0].value.contains('&') + && matches!(t[1].kind, TokenKind::Arg) + && t[1].value == "/dev/null" + { + matched_suffix = format!("{}{}", t[0].value, t[1].value); + matched_len = 2; + } + } + + if matched_len == 0 && n >= 3 { + let t = &tokens[n - 2..]; + if matches!(t[0].kind, TokenKind::Pipe) + && matches!(t[1].kind, TokenKind::Arg) + && t[1].value == "cat" + { + matched_suffix = "| cat".to_string(); + matched_len = 2; + } + } + + if matched_len == 0 && n >= 3 { + let t = &tokens[n - 2..]; + if matches!(t[0].kind, TokenKind::Redirect) + && t[0].value == ">" + && matches!(t[1].kind, TokenKind::Arg) + && t[1].value == "/dev/null" + { + matched_suffix = "> /dev/null".to_string(); + matched_len = 2; + } + } + + if matched_len == 0 && n >= 3 { + let t = &tokens[n - 2..]; + if matches!(t[0].kind, TokenKind::Redirect) + && t[0].value == ">>" + && matches!(t[1].kind, TokenKind::Arg) + { + matched_suffix = format!(">> {}", t[1].value); + matched_len = 2; + } + } + + if matched_len == 0 && n >= 2 { + let last = &tokens[n - 1]; + if matches!(last.kind, TokenKind::Redirect) && last.value.contains(">&") { + matched_suffix = last.value.clone(); + matched_len = 1; + } + } + + if matched_len == 0 && n >= 2 { + let last = &tokens[n - 1]; + if matches!(last.kind, TokenKind::Shellism) && last.value == "&" { + matched_suffix = "&".to_string(); + matched_len = 1; + } + } + + if matched_len == 0 { + break; + } + + tokens.truncate(n - matched_len); + suffixes.push(matched_suffix); + } + + suffixes.reverse(); + let suffix = suffixes.join(" "); + (tokens, suffix) +} + +pub fn needs_shell(tokens: &[ParsedToken]) -> bool { + tokens.iter().any(|t| { + matches!( + t.kind, + TokenKind::Shellism | TokenKind::Pipe | TokenKind::Redirect + ) + }) +} + +pub fn parse_chain(tokens: Vec) -> Result, String> { + let mut commands = Vec::new(); + let mut current_args = Vec::new(); + + for token in tokens { + match token.kind { + TokenKind::Arg => { + current_args.push(strip_quotes(&token.value)); + } + TokenKind::Operator => { + if current_args.is_empty() { + return Err(format!( + "Syntax error: operator {} with no command", + token.value + )); + } + let binary = current_args.remove(0); + commands.push(NativeCommand { + binary, + args: current_args.clone(), + operator: Some(token.value.clone()), + }); + current_args.clear(); + } + TokenKind::Pipe | TokenKind::Redirect | TokenKind::Shellism => { + return Err(format!( + "Unexpected {:?} in native mode - use passthrough", + token.kind + )); + } + } + } + + if !current_args.is_empty() { + let binary = current_args.remove(0); + commands.push(NativeCommand { + binary, + args: current_args, + operator: None, + }); + } + + Ok(commands) +} + +pub fn should_run(operator: Option<&str>, last_success: bool) -> bool { + match operator { + Some("&&") => last_success, + Some("||") => !last_success, + Some(";") | None => true, + _ => true, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cmd::lexer::tokenize; + + #[test] + fn test_split_suffix_2_redirect() { + let tokens = tokenize("cargo test 2>&1"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "2>&1"); + assert!(!needs_shell(&core)); + let cmds = parse_chain(core).unwrap(); + assert_eq!(cmds[0].binary, "cargo"); + assert_eq!(cmds[0].args, vec!["test"]); + } + + #[test] + fn test_split_suffix_dev_null() { + let tokens = tokenize("cargo test 2>/dev/null"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "2>/dev/null"); + let cmds = parse_chain(core).unwrap(); + assert_eq!(cmds[0].binary, "cargo"); + } + + #[test] + fn test_split_suffix_stdout_dev_null() { + let tokens = tokenize("cargo test > /dev/null"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "> /dev/null"); + let cmds = parse_chain(core).unwrap(); + assert_eq!(cmds[0].binary, "cargo"); + } + + #[test] + fn test_split_suffix_pipe_tee() { + let tokens = tokenize("cargo test | tee /tmp/log.txt"); + let (core, suffix) = split_safe_suffix(tokens); + assert!(suffix.starts_with("| tee"), "suffix: {suffix}"); + assert!(suffix.contains("/tmp/log.txt"), "suffix: {suffix}"); + let cmds = parse_chain(core).unwrap(); + assert_eq!(cmds[0].binary, "cargo"); + } + + #[test] + fn test_split_suffix_pipe_head() { + let tokens = tokenize("git log | head -20"); + let (core, suffix) = split_safe_suffix(tokens); + assert!(suffix.starts_with("| head"), "suffix: {suffix}"); + let cmds = parse_chain(core).unwrap(); + assert_eq!(cmds[0].binary, "git"); + } + + #[test] + fn test_split_suffix_pipe_tail() { + let tokens = tokenize("git log | tail -10"); + let (_core, suffix) = split_safe_suffix(tokens); + assert!(suffix.starts_with("| tail"), "suffix: {suffix}"); + } + + #[test] + fn test_split_suffix_pipe_cat() { + let tokens = tokenize("ls --color | cat"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "| cat"); + let cmds = parse_chain(core).unwrap(); + assert_eq!(cmds[0].binary, "ls"); + } + + #[test] + fn test_split_suffix_append_redirect() { + let tokens = tokenize("cargo build >> /tmp/build.log"); + let (core, suffix) = split_safe_suffix(tokens); + assert!(suffix.starts_with(">>"), "suffix: {suffix}"); + let cmds = parse_chain(core).unwrap(); + assert_eq!(cmds[0].binary, "cargo"); + } + + #[test] + fn test_split_suffix_none() { + let tokens = tokenize("cargo test"); + let n = tokens.len(); + let (core, suffix) = split_safe_suffix(tokens); + assert!(suffix.is_empty(), "no suffix expected, got: {suffix}"); + assert_eq!(core.len(), n); + } + + #[test] + fn test_split_suffix_glob_core_stays_shellism() { + let tokens = tokenize("ls *.rs 2>&1"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "2>&1"); + assert!(needs_shell(&core)); + } + + #[test] + fn test_split_suffix_requires_core_token() { + let tokens = tokenize("2>&1"); + let (core, suffix) = split_safe_suffix(tokens); + assert!( + suffix.is_empty() || core.is_empty(), + "bare suffix with no core should not produce a valid split" + ); + } + + #[test] + fn test_needs_shell_simple() { + let tokens = tokenize("git status"); + assert!(!needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_with_glob() { + let tokens = tokenize("ls *.rs"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_with_pipe() { + let tokens = tokenize("cat file | grep x"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_with_redirect() { + let tokens = tokenize("cmd > file"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_with_chain() { + let tokens = tokenize("cd dir && git status"); + assert!(!needs_shell(&tokens)); + } + + #[test] + fn test_parse_simple_command() { + let tokens = tokenize("git status"); + let cmds = parse_chain(tokens).unwrap(); + assert_eq!(cmds.len(), 1); + assert_eq!(cmds[0].binary, "git"); + assert_eq!(cmds[0].args, vec!["status"]); + assert_eq!(cmds[0].operator, None); + } + + #[test] + fn test_parse_command_with_multiple_args() { + let tokens = tokenize("git commit -m message"); + let cmds = parse_chain(tokens).unwrap(); + assert_eq!(cmds.len(), 1); + assert_eq!(cmds[0].binary, "git"); + assert_eq!(cmds[0].args, vec!["commit", "-m", "message"]); + } + + #[test] + fn test_parse_chained_and() { + let tokens = tokenize("cd dir && git status"); + let cmds = parse_chain(tokens).unwrap(); + assert_eq!(cmds.len(), 2); + assert_eq!(cmds[0].binary, "cd"); + assert_eq!(cmds[0].args, vec!["dir"]); + assert_eq!(cmds[0].operator, Some("&&".to_string())); + assert_eq!(cmds[1].binary, "git"); + assert_eq!(cmds[1].args, vec!["status"]); + assert_eq!(cmds[1].operator, None); + } + + #[test] + fn test_parse_chained_or() { + let tokens = tokenize("cmd1 || cmd2"); + let cmds = parse_chain(tokens).unwrap(); + assert_eq!(cmds.len(), 2); + assert_eq!(cmds[0].operator, Some("||".to_string())); + } + + #[test] + fn test_parse_chained_semicolon() { + let tokens = tokenize("cmd1 ; cmd2 ; cmd3"); + let cmds = parse_chain(tokens).unwrap(); + assert_eq!(cmds.len(), 3); + assert_eq!(cmds[0].operator, Some(";".to_string())); + assert_eq!(cmds[1].operator, Some(";".to_string())); + assert_eq!(cmds[2].operator, None); + } + + #[test] + fn test_parse_triple_chain() { + let tokens = tokenize("a && b && c"); + let cmds = parse_chain(tokens).unwrap(); + assert_eq!(cmds.len(), 3); + } + + #[test] + fn test_parse_operator_at_start() { + let tokens = tokenize("&& cmd"); + let result = parse_chain(tokens); + assert!(result.is_err()); + } + + #[test] + fn test_parse_operator_at_end() { + let tokens = tokenize("cmd &&"); + let cmds = parse_chain(tokens).unwrap(); + assert_eq!(cmds.len(), 1); + assert_eq!(cmds[0].operator, Some("&&".to_string())); + } + + #[test] + fn test_parse_quoted_arg() { + let tokens = tokenize("git commit -m \"Fix && Bug\""); + let cmds = parse_chain(tokens).unwrap(); + assert_eq!(cmds.len(), 1); + assert_eq!(cmds[0].args.len(), 3); + assert_eq!(cmds[0].args[2], "Fix && Bug"); + } + + #[test] + fn test_parse_empty() { + let tokens = tokenize(""); + let cmds = parse_chain(tokens).unwrap(); + assert!(cmds.is_empty()); + } + + #[test] + fn test_needs_shell_find_piped_to_grep() { + let tokens = tokenize("find . -name \"*.rs\" | grep pattern"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_rg_piped_to_head() { + let tokens = tokenize("rg pattern src/ | head -20"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_grep_with_redirect() { + let tokens = tokenize("grep -r pattern . > results.txt"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_find_with_glob_arg() { + let tokens = tokenize("find . -name *.rs"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_quoted_pipe_in_grep_arg_no_shell() { + let tokens = tokenize("grep \"a|b\" src/"); + assert!(!needs_shell(&tokens)); + } + + #[test] + fn test_parse_chain_find_with_quoted_name() { + let tokens = tokenize("find . -name \"*.rs\""); + assert!(!needs_shell(&tokens)); + let cmds = parse_chain(tokens).unwrap(); + assert_eq!(cmds[0].binary, "find"); + assert!(cmds[0].args.contains(&"-name".to_string())); + assert!( + cmds[0].args.iter().any(|a| a == "*.rs"), + "quoted glob stripped to bare glob in args: {:?}", + cmds[0].args + ); + } + + #[test] + fn test_parse_chain_grep_native_no_pipe() { + let tokens = tokenize("grep pattern file.rs"); + assert!(!needs_shell(&tokens)); + let cmds = parse_chain(tokens).unwrap(); + assert_eq!(cmds[0].binary, "grep"); + assert_eq!(cmds[0].args, vec!["pattern", "file.rs"]); + } + + #[test] + fn test_should_run_and_success() { + assert!(should_run(Some("&&"), true)); + } + + #[test] + fn test_should_run_and_failure() { + assert!(!should_run(Some("&&"), false)); + } + + #[test] + fn test_should_run_or_success() { + assert!(!should_run(Some("||"), true)); + } + + #[test] + fn test_should_run_or_failure() { + assert!(should_run(Some("||"), false)); + } + + #[test] + fn test_should_run_semicolon() { + assert!(should_run(Some(";"), true)); + assert!(should_run(Some(";"), false)); + } + + #[test] + fn test_should_run_none() { + assert!(should_run(None, true)); + assert!(should_run(None, false)); + } + + #[test] + fn test_needs_shell_redirect_to_dev_null() { + let tokens = tokenize("cmd > /dev/null"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_stderr_to_dev_null() { + let tokens = tokenize("cmd 2>/dev/null"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_stderr_to_dev_null_spaced() { + let tokens = tokenize("cmd 2> /dev/null"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_stderr_to_stdout() { + let tokens = tokenize("cmd 2>&1"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_stdout_to_stderr() { + let tokens = tokenize("cmd 1>&2"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_combined_redirect_chain() { + let tokens = tokenize("cmd > /dev/null 2>&1"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_redirect_append() { + let tokens = tokenize("cmd >> /tmp/output.txt"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_stderr_redirect_to_file() { + let tokens = tokenize("cmd 2> /tmp/err.log"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_pipe_to_tail() { + let tokens = tokenize("git log | tail -20"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_pipe_to_cat() { + let tokens = tokenize("ls --color | cat"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_pipe_to_tee() { + let tokens = tokenize("cargo build 2>&1 | tee /tmp/build.log"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_needs_shell_pipe_to_wc() { + let tokens = tokenize("find . -name '*.rs' | wc -l"); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_operator_and_does_not_trigger_shell() { + let tokens = tokenize("cargo fmt && cargo clippy"); + assert!(!needs_shell(&tokens)); + } + + #[test] + fn test_operator_or_does_not_trigger_shell() { + let tokens = tokenize("cargo test || true"); + assert!(!needs_shell(&tokens)); + } + + #[test] + fn test_operator_semicolon_does_not_trigger_shell() { + let tokens = tokenize("true ; false"); + assert!(!needs_shell(&tokens)); + } + + #[test] + fn test_redirect_suffix_is_passed_through_verbatim() { + let raw = "cargo test 2>&1 | tee /tmp/test.log"; + let tokens = tokenize(raw); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_background_job_suffix_simple() { + let tokens = tokenize("cargo build &"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "&"); + assert_eq!(core.len(), 2); + assert!(!needs_shell(&core)); + } + + #[test] + fn test_background_job_suffix_git_status() { + let tokens = tokenize("git status &"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "&"); + assert_eq!(core.len(), 2); + assert!(!needs_shell(&core)); + } + + #[test] + fn test_background_job_suffix_with_fd_redirect() { + // With the current lexer, 2>&1 is a single Redirect token (no Shellism), + // so both 2>&1 and & are safely stripped as independent suffixes + let tokens = tokenize("cargo build 2>&1 &"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "2>&1 &"); + assert!(!needs_shell(&core)); + } + + #[test] + fn test_background_job_suffix_single_token_not_stripped() { + let tokens = tokenize("&"); + let (core, suffix) = split_safe_suffix(tokens); + assert!(suffix.is_empty()); + assert_eq!(core.len(), 1); + } + + #[test] + fn test_cargo_test_pipe_grep_is_not_safe_suffix() { + let tokens = tokenize("cargo test | grep FAILED"); + let (_core, suffix) = split_safe_suffix(tokens.clone()); + assert!(suffix.is_empty()); + assert!(needs_shell(&tokens)); + } + + #[test] + fn test_nohup_background_strips_ampersand() { + let tokens = tokenize("nohup cargo build &"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "&"); + assert_eq!(core[0].value, "nohup"); + assert_eq!(core.len(), 3); + assert!(!needs_shell(&core)); + } + + #[test] + fn test_split_suffix_compound_redirect_pipe_tail() { + let tokens = tokenize("cargo test 2>&1 | tail -50"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "2>&1 | tail -50"); + assert!(!needs_shell(&core)); + let cmds = parse_chain(core).expect("core must parse"); + assert_eq!(cmds[0].binary, "cargo"); + assert_eq!(cmds[0].args, vec!["test"]); + } + + #[test] + fn test_split_suffix_compound_devnull_redirect() { + let tokens = tokenize("cmd > /dev/null 2>&1"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "> /dev/null 2>&1"); + assert!(!needs_shell(&core)); + assert_eq!(core.len(), 1); + } + + #[test] + fn test_split_suffix_compound_redirect_pipe_tee() { + let tokens = tokenize("cargo build 2>&1 | tee /tmp/log"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "2>&1 | tee /tmp/log"); + assert!(!needs_shell(&core)); + } + + #[test] + fn test_split_suffix_triple_compound() { + let tokens = tokenize("cmd >> /tmp/log 2>&1 | tail -5"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, ">> /tmp/log 2>&1 | tail -5"); + assert!(!needs_shell(&core)); + assert_eq!(core.len(), 1); + } + + #[test] + fn test_split_suffix_unsafe_pipe_with_redirect_not_stripped() { + let tokens = tokenize("cargo test | grep FAILED 2>&1"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "2>&1"); + assert!(needs_shell(&core)); + } + + #[test] + fn test_split_suffix_devnull_background() { + let tokens = tokenize("cargo build > /dev/null &"); + let (core, suffix) = split_safe_suffix(tokens); + assert_eq!(suffix, "> /dev/null &"); + assert!(!needs_shell(&core)); + } +} diff --git a/src/cmd/builtins.rs b/src/cmd/builtins.rs new file mode 100644 index 000000000..17ec4d1b9 --- /dev/null +++ b/src/cmd/builtins.rs @@ -0,0 +1,258 @@ +use super::predicates::{expand_tilde, get_home}; +use anyhow::{Context, Result}; + +pub fn builtin_cd(args: &[String]) -> Result { + let target = args + .first() + .map(|s| expand_tilde(s)) + .unwrap_or_else(get_home); + + std::env::set_current_dir(&target) + .with_context(|| format!("cd: {}: No such file or directory", target))?; + + Ok(true) +} + +fn is_valid_env_name(name: &str) -> bool { + let mut chars = name.chars(); + matches!(chars.next(), Some(c) if c.is_ascii_alphabetic() || c == '_') + && chars.all(|c| c.is_ascii_alphanumeric() || c == '_') +} + +pub fn builtin_export(args: &[String]) -> Result { + for arg in args { + if let Some((key, value)) = arg.split_once('=') { + if !is_valid_env_name(key) { + continue; + } + let clean_value = value + .strip_prefix('"') + .and_then(|v| v.strip_suffix('"')) + .or_else(|| value.strip_prefix('\'').and_then(|v| v.strip_suffix('\''))) + .unwrap_or(value); + std::env::set_var(key, clean_value); + } + } + Ok(true) +} + +pub fn is_builtin(binary: &str) -> bool { + matches!( + binary, + "cd" | "export" | "pwd" | "echo" | "true" | "false" | ":" + ) +} + +pub fn execute(binary: &str, args: &[String]) -> Result { + match binary { + "cd" => builtin_cd(args), + "export" => builtin_export(args), + "pwd" => { + println!("{}", std::env::current_dir()?.display()); + Ok(true) + } + "echo" => { + let (print_args, no_newline) = if args.first().map(|s| s.as_str()) == Some("-n") { + (&args[1..], true) + } else { + (args, false) + }; + print!("{}", print_args.join(" ")); + if !no_newline { + println!(); + } + Ok(true) + } + "true" | ":" => Ok(true), + "false" => Ok(false), + _ => anyhow::bail!("Unknown builtin: {}", binary), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::env; + + #[test] + fn test_cd_all_cases() { + let original = env::current_dir().unwrap(); + let home = get_home(); + + let result = builtin_cd(&["/tmp".to_string()]).unwrap(); + assert!(result); + let new_dir = env::current_dir().unwrap(); + let canon_tmp = std::fs::canonicalize("/tmp").unwrap(); + let canon_new = std::fs::canonicalize(&new_dir).unwrap(); + assert_eq!(canon_new, canon_tmp, "cd /tmp should land in /tmp"); + + let result = builtin_cd(&["/nonexistent/path/xyz".to_string()]); + assert!(result.is_err()); + assert_eq!( + std::fs::canonicalize(env::current_dir().unwrap()).unwrap(), + canon_tmp + ); + + let result = builtin_cd(&[]).unwrap(); + assert!(result); + let cwd = env::current_dir().unwrap(); + let canon_home = std::fs::canonicalize(&home).unwrap(); + let canon_cwd = std::fs::canonicalize(&cwd).unwrap(); + assert_eq!(canon_cwd, canon_home, "cd with no args should go home"); + + let _ = env::set_current_dir("/tmp"); + let result = builtin_cd(&["~".to_string()]).unwrap(); + assert!(result); + let cwd = std::fs::canonicalize(env::current_dir().unwrap()).unwrap(); + assert_eq!(cwd, canon_home, "cd ~ should go home"); + + let _ = builtin_cd(&["~/nonexistent_rtk_test_subpath_xyz".to_string()]); + + let _ = env::set_current_dir(&original); + } + + #[test] + fn test_export_simple() { + builtin_export(&["RTK_TEST_SIMPLE=value".to_string()]).unwrap(); + assert_eq!(env::var("RTK_TEST_SIMPLE").unwrap(), "value"); + env::remove_var("RTK_TEST_SIMPLE"); + } + + #[test] + fn test_export_with_equals_in_value() { + builtin_export(&["RTK_TEST_EQUALS=key=value".to_string()]).unwrap(); + assert_eq!(env::var("RTK_TEST_EQUALS").unwrap(), "key=value"); + env::remove_var("RTK_TEST_EQUALS"); + } + + #[test] + fn test_export_quoted_value() { + builtin_export(&["RTK_TEST_QUOTED=\"hello world\"".to_string()]).unwrap(); + assert_eq!(env::var("RTK_TEST_QUOTED").unwrap(), "hello world"); + env::remove_var("RTK_TEST_QUOTED"); + } + + #[test] + fn test_export_multiple() { + builtin_export(&["RTK_TEST_A=1".to_string(), "RTK_TEST_B=2".to_string()]).unwrap(); + assert_eq!(env::var("RTK_TEST_A").unwrap(), "1"); + assert_eq!(env::var("RTK_TEST_B").unwrap(), "2"); + env::remove_var("RTK_TEST_A"); + env::remove_var("RTK_TEST_B"); + } + + #[test] + fn test_export_no_equals() { + let result = builtin_export(&["NO_EQUALS_HERE".to_string()]).unwrap(); + assert!(result); + } + + #[test] + fn test_export_invalid_identifier_ignored() { + let result = builtin_export(&["123=x".to_string()]).unwrap(); + assert!( + result, + "builtin_export must succeed even with invalid identifier" + ); + assert!( + env::var("123").is_err(), + "var with numeric-start name must not be set" + ); + } + + #[test] + fn test_export_empty_name_ignored() { + let result = builtin_export(&["=x".to_string()]).unwrap(); + assert!(result); + } + + #[test] + fn test_is_valid_env_name() { + assert!(is_valid_env_name("FOO")); + assert!(is_valid_env_name("_FOO")); + assert!(is_valid_env_name("foo_bar_123")); + assert!(!is_valid_env_name("123foo")); + assert!(!is_valid_env_name("")); + assert!(!is_valid_env_name("foo-bar")); + assert!(!is_valid_env_name("foo bar")); + } + + #[test] + fn test_is_builtin_cd() { + assert!(is_builtin("cd")); + } + + #[test] + fn test_is_builtin_export() { + assert!(is_builtin("export")); + } + + #[test] + fn test_is_builtin_pwd() { + assert!(is_builtin("pwd")); + } + + #[test] + fn test_is_builtin_echo() { + assert!(is_builtin("echo")); + } + + #[test] + fn test_is_builtin_true() { + assert!(is_builtin("true")); + } + + #[test] + fn test_is_builtin_false() { + assert!(is_builtin("false")); + } + + #[test] + fn test_is_builtin_external() { + assert!(!is_builtin("git")); + assert!(!is_builtin("ls")); + assert!(!is_builtin("cargo")); + } + + #[test] + fn test_execute_pwd() { + let result = execute("pwd", &[]).unwrap(); + assert!(result); + } + + #[test] + fn test_execute_echo() { + let result = execute("echo", &["hello".to_string(), "world".to_string()]).unwrap(); + assert!(result); + } + + #[test] + fn test_execute_true() { + let result = execute("true", &[]).unwrap(); + assert!(result); + } + + #[test] + fn test_execute_false() { + let result = execute("false", &[]).unwrap(); + assert!(!result); + } + + #[test] + fn test_execute_unknown_builtin() { + let result = execute("notabuiltin", &[]); + assert!(result.is_err()); + } + + #[test] + fn test_execute_echo_n_flag() { + let result = execute("echo", &["-n".to_string(), "hello".to_string()]).unwrap(); + assert!(result); + } + + #[test] + fn test_execute_echo_empty_args() { + let result = execute("echo", &[]).unwrap(); + assert!(result); + } +} diff --git a/src/cmd/exec.rs b/src/cmd/exec.rs new file mode 100644 index 000000000..89901568e --- /dev/null +++ b/src/cmd/exec.rs @@ -0,0 +1,472 @@ +use anyhow::{Context, Result}; +use std::process::Command; + +use super::{analysis, builtins, filters, lexer}; +use crate::core::stream::{FilterMode, LineFilter, StdinMode}; +use crate::core::tracking; + +fn is_rtk_active() -> bool { + std::env::var("RTK_ACTIVE").is_ok() +} + +struct RtkActiveGuard; + +impl RtkActiveGuard { + fn new() -> Self { + std::env::set_var("RTK_ACTIVE", "1"); + RtkActiveGuard + } +} + +impl Drop for RtkActiveGuard { + fn drop(&mut self) { + std::env::remove_var("RTK_ACTIVE"); + } +} + +pub fn execute(raw: &str, verbose: u8) -> Result { + if is_rtk_active() { + if verbose > 0 { + eprintln!("rtk: Recursion detected, passing through"); + } + return run_passthrough(raw, verbose); + } + + if raw.trim().is_empty() { + return Ok(0); + } + + let _guard = RtkActiveGuard::new(); + execute_inner(raw, verbose) +} + +fn execute_inner(raw: &str, verbose: u8) -> Result { + let tokens = lexer::tokenize(raw); + + if analysis::needs_shell(&tokens) { + return run_passthrough(raw, verbose); + } + + let commands = + analysis::parse_chain(tokens).map_err(|e| anyhow::anyhow!("Parse error: {}", e))?; + + run_native(&commands, verbose) +} + +fn run_native(commands: &[analysis::NativeCommand], verbose: u8) -> Result { + let mut last_exit: i32 = 0; + let mut prev_operator: Option<&str> = None; + + for cmd in commands { + if !analysis::should_run(prev_operator, last_exit == 0) { + prev_operator = cmd.operator.as_deref(); + continue; + } + + // ISSUE #917: flatten nested rtk run to prevent recursion + if cmd.binary == "rtk" && cmd.args.first().map(|s| s.as_str()) == Some("run") { + let inner = if cmd.args.get(1).map(|s| s.as_str()) == Some("-c") { + cmd.args.get(2).cloned().unwrap_or_default() + } else { + cmd.args.get(1).cloned().unwrap_or_default() + }; + if verbose > 0 { + eprintln!("rtk: Flattening nested rtk run"); + } + return execute(&inner, verbose); + } + + if builtins::is_builtin(&cmd.binary) { + let ok = builtins::execute(&cmd.binary, &cmd.args)?; + last_exit = if ok { 0 } else { 1 }; + prev_operator = cmd.operator.as_deref(); + continue; + } + + last_exit = spawn_with_filter(&cmd.binary, &cmd.args, verbose)?; + prev_operator = cmd.operator.as_deref(); + } + + Ok(last_exit) +} + +fn spawn_with_filter(binary: &str, args: &[String], verbose: u8) -> Result { + let timer = tracking::TimedExecution::start(); + + if verbose > 1 { + eprintln!( + "[rtk exec] binary={} interactive={} unstaged={}", + binary, + super::predicates::is_interactive(), + super::predicates::has_unstaged_changes(), + ); + } + + let binary_path = match which::which(binary) { + Ok(path) => path, + Err(_) => { + eprintln!("rtk: {}: command not found", binary); + return Ok(127); + } + }; + + let mut cmd = Command::new(&binary_path); + cmd.args(args); + + let mode = filters::get_filter_mode(binary); + let result = crate::core::stream::run_streaming(&mut cmd, StdinMode::Inherit, mode) + .with_context(|| format!("Failed to execute: {}", binary))?; + + let orig_cmd = if args.is_empty() { + binary.to_string() + } else { + format!("{} {}", binary, args.join(" ")) + }; + + let rtk_cmd = if binary == "rtk" { + if args.is_empty() { + "rtk".to_string() + } else { + format!("rtk {}", args.join(" ")) + } + } else { + let native_cmd = analysis::NativeCommand { + binary: binary.to_string(), + args: args.to_vec(), + operator: None, + }; + match super::hook::try_route_native_command(&native_cmd, &orig_cmd) { + Some(routed) => routed, + None => format!("rtk run {}", orig_cmd), + } + }; + timer.track(&orig_cmd, &rtk_cmd, &result.raw, &result.filtered); + + Ok(result.exit_code) +} + +pub fn run_passthrough(raw: &str, verbose: u8) -> Result { + if verbose > 0 { + eprintln!("rtk: Passthrough mode for complex command"); + } + + let timer = tracking::TimedExecution::start(); + + let shell = if cfg!(windows) { "cmd" } else { "sh" }; + let flag = if cfg!(windows) { "/C" } else { "-c" }; + + let mut cmd = Command::new(shell); + cmd.arg(flag).arg(raw); + + let filter = LineFilter::new(|l| Some(format!("{}\n", crate::core::utils::strip_ansi(l)))); + let result = crate::core::stream::run_streaming( + &mut cmd, + StdinMode::Inherit, + FilterMode::Streaming(Box::new(filter)), + ) + .context("Failed to execute passthrough")?; + + timer.track( + raw, + &format!("rtk passthrough {}", raw), + &result.raw, + &result.filtered, + ); + + Ok(result.exit_code) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cmd::hook; + use crate::cmd::test_helpers::EnvGuard; + + #[test] + fn test_is_rtk_active_default() { + let _env = EnvGuard::new(); + assert!(!is_rtk_active()); + } + + #[test] + fn test_raii_guard_sets_and_clears() { + let _env = EnvGuard::new(); + { + let _guard = RtkActiveGuard::new(); + assert!(is_rtk_active()); + } + assert!( + !is_rtk_active(), + "RTK_ACTIVE must be cleared when guard drops" + ); + } + + #[test] + fn test_raii_guard_clears_on_panic() { + let _env = EnvGuard::new(); + let result = std::panic::catch_unwind(|| { + let _guard = RtkActiveGuard::new(); + assert!(is_rtk_active()); + panic!("simulated panic"); + }); + assert!(result.is_err()); + assert!( + !is_rtk_active(), + "RTK_ACTIVE must be cleared even after panic" + ); + } + + #[test] + fn test_execute_empty() { + assert_eq!(execute("", 0).unwrap(), 0); + } + + #[test] + fn test_execute_whitespace_only() { + assert_eq!(execute(" ", 0).unwrap(), 0); + } + + #[test] + fn test_execute_simple_command() { + assert_eq!(execute("echo hello", 0).unwrap(), 0); + } + + #[test] + fn test_execute_builtin_cd() { + let original = std::env::current_dir().unwrap(); + assert_eq!(execute("cd /tmp", 0).unwrap(), 0); + let _ = std::env::set_current_dir(&original); + } + + #[test] + fn test_execute_builtin_pwd() { + assert_eq!(execute("pwd", 0).unwrap(), 0); + } + + #[test] + fn test_execute_builtin_true() { + assert_eq!(execute("true", 0).unwrap(), 0); + } + + #[test] + fn test_execute_builtin_false() { + assert_ne!(execute("false", 0).unwrap(), 0); + } + + #[test] + fn test_execute_chain_and_success() { + assert_eq!(execute("true && echo success", 0).unwrap(), 0); + } + + #[test] + fn test_execute_chain_and_failure() { + assert_ne!(execute("false && echo should_not_run", 0).unwrap(), 0); + } + + #[test] + fn test_execute_chain_or_success() { + assert_eq!(execute("true || echo should_not_run", 0).unwrap(), 0); + } + + #[test] + fn test_execute_chain_or_failure() { + assert_eq!(execute("false || echo fallback", 0).unwrap(), 0); + } + + #[test] + fn test_execute_chain_semicolon() { + assert_ne!(execute("true ; false", 0).unwrap(), 0); + } + + #[test] + fn test_execute_passthrough_for_glob() { + assert_eq!(execute("echo *", 0).unwrap(), 0); + } + + #[test] + fn test_execute_passthrough_for_pipe() { + assert_eq!(execute("echo hello | cat", 0).unwrap(), 0); + } + + #[test] + fn test_execute_quoted_operator() { + assert_eq!(execute(r#"echo "hello && world""#, 0).unwrap(), 0); + } + + #[test] + fn test_execute_binary_not_found() { + assert_eq!(execute("nonexistent_command_xyz_123", 0).unwrap(), 127); + } + + #[test] + fn test_execute_chain_and_three_commands() { + assert_ne!(execute("true && false && true", 0).unwrap(), 0); + } + + #[test] + fn test_execute_chain_semicolon_last_wins() { + assert_eq!(execute("false ; true", 0).unwrap(), 0); + } + + #[test] + fn test_chain_mixed_operators() { + assert_eq!(execute("false || true && echo works", 0).unwrap(), 0); + } + + #[test] + fn test_passthrough_redirect() { + assert_eq!(execute("echo test > /dev/null", 0).unwrap(), 0); + } + + #[test] + fn test_integration_cd_tilde() { + let original = std::env::current_dir().unwrap(); + assert_eq!(execute("cd ~", 0).unwrap(), 0); + let _ = std::env::set_current_dir(&original); + } + + #[test] + fn test_integration_export() { + assert_eq!(execute("export TEST_VAR=value", 0).unwrap(), 0); + std::env::remove_var("TEST_VAR"); + } + + #[test] + fn test_integration_env_prefix() { + let result = execute("TEST=1 echo hello", 0); + assert!(result.is_ok()); + } + + #[test] + fn test_integration_dash_args() { + assert_eq!(execute("echo --help -v --version", 0).unwrap(), 0); + } + + #[test] + fn test_integration_quoted_empty() { + assert_eq!(execute(r#"echo """#, 0).unwrap(), 0); + } + + #[test] + fn test_execute_rtk_recursion() { + let result = execute("rtk run \"echo hello\"", 0); + assert!(result.is_ok()); + } + + #[test] + fn test_execute_returns_real_exit_code() { + let code = execute("sh -c \"exit 42\"", 0).unwrap(); + assert_eq!(code, 42, "exit code must be propagated exactly"); + } + + #[test] + fn test_execute_success_returns_zero() { + assert_eq!(execute("true", 0).unwrap(), 0); + } + + #[test] + fn test_run_native_and_chain_exit_code() { + assert_ne!(execute("true && false", 0).unwrap(), 0); + } + + fn compute_rtk_cmd_label(binary: &str, args: &[&str]) -> String { + let native_cmd = analysis::NativeCommand { + binary: binary.to_string(), + args: args.iter().map(|s| s.to_string()).collect(), + operator: None, + }; + let orig_cmd = if args.is_empty() { + binary.to_string() + } else { + format!("{} {}", binary, args.join(" ")) + }; + + if binary == "rtk" { + if args.is_empty() { + "rtk".to_string() + } else { + format!("rtk {}", args.join(" ")) + } + } else { + match hook::try_route_native_command(&native_cmd, &orig_cmd) { + Some(routed) => routed, + None => format!("rtk run {}", orig_cmd), + } + } + } + + #[test] + fn test_tracking_routed_command_uses_rtk_prefix() { + let label = compute_rtk_cmd_label("ls", &["-F"]); + assert!( + label == "rtk ls -F", + "Expected 'rtk ls -F', got '{}'", + label + ); + } + + #[test] + fn test_tracking_git_status_uses_rtk_git() { + let label = compute_rtk_cmd_label("git", &["status"]); + assert!( + label == "rtk git status", + "Expected 'rtk git status', got '{}'", + label + ); + } + + #[test] + fn test_tracking_cargo_test_uses_rtk_cargo() { + let label = compute_rtk_cmd_label("cargo", &["test"]); + assert!( + label == "rtk cargo test", + "Expected 'rtk cargo test', got '{}'", + label + ); + } + + #[test] + fn test_tracking_unknown_command_uses_rtk_run() { + let label = compute_rtk_cmd_label("python3", &["--version"]); + assert!( + label == "rtk run python3 --version", + "Expected 'rtk run python3 --version', got '{}'", + label + ); + } + + #[test] + fn test_tracking_rtk_self_reference_no_double_rtk() { + let label = compute_rtk_cmd_label("rtk", &["git", "status"]); + assert!( + label == "rtk git status", + "Expected 'rtk git status', got '{}'", + label + ); + assert!( + !label.contains("rtk run rtk"), + "Should NOT contain 'rtk run rtk', got '{}'", + label + ); + } + + #[test] + fn test_tracking_find_uses_rtk_run() { + let label = compute_rtk_cmd_label("find", &[".", "-name", "*.rs"]); + assert!( + label.starts_with("rtk run"), + "Expected 'rtk run ...' (find not in ROUTES), got '{}'", + label + ); + } + + #[test] + fn test_tracking_grep_uses_rtk_grep() { + let label = compute_rtk_cmd_label("grep", &["-r", "pattern"]); + assert!( + label.starts_with("rtk grep"), + "Expected 'rtk grep ...', got '{}'", + label + ); + } +} diff --git a/src/cmd/filters.rs b/src/cmd/filters.rs new file mode 100644 index 000000000..d16eb295c --- /dev/null +++ b/src/cmd/filters.rs @@ -0,0 +1,326 @@ +use crate::core::stream::{FilterMode, LineFilter}; +use crate::core::utils; + +fn filter_cargo_output(output: &str) -> String { + output + .lines() + .filter(|line| { + let line = line.trim(); + !line.starts_with("Compiling ") || line.contains("error") || line.contains("warning") + }) + .collect::>() + .join("\n") +} + +fn filter_test_output(output: &str) -> String { + output + .lines() + .filter(|line| { + let line = line.trim(); + line.contains("FAILED") + || line.contains("error") + || line.contains("Error") + || line.contains("failed") + || line.contains("test result:") + || line.starts_with("----") + }) + .collect::>() + .join("\n") +} + +pub fn get_filter_mode(binary: &str) -> FilterMode { + match binary { + "ls" | "find" | "grep" | "rg" | "fd" => { + FilterMode::Streaming(Box::new(LineFilter::new(|l| { + let stripped = utils::strip_ansi(l); + let truncated = if stripped.len() > 120 { + format!("{}...", &stripped[..117]) + } else { + stripped + }; + Some(format!("{}\n", truncated)) + }))) + } + "cargo" => FilterMode::Buffered(filter_cargo_output), + "pytest" | "jest" | "mocha" | "vitest" | "mypy" | "ruff" | "golangci-lint" => { + FilterMode::Buffered(filter_test_output) + } + "git" => FilterMode::Streaming(Box::new(LineFilter::new(|l| { + Some(format!("{}\n", utils::strip_ansi(l))) + }))), + "npm" | "npx" | "pnpm" => FilterMode::Streaming(Box::new(LineFilter::new(|l| { + Some(format!("{}\n", utils::strip_ansi(l))) + }))), + _ => FilterMode::Passthrough, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_strip_ansi_no_codes() { + assert_eq!(utils::strip_ansi("hello world"), "hello world"); + } + + #[test] + fn test_strip_ansi_color() { + assert_eq!(utils::strip_ansi("\x1b[32mgreen\x1b[0m"), "green"); + } + + #[test] + fn test_strip_ansi_bold() { + assert_eq!(utils::strip_ansi("\x1b[1mbold\x1b[0m"), "bold"); + } + + #[test] + fn test_strip_ansi_multiple() { + assert_eq!( + utils::strip_ansi("\x1b[31mred\x1b[0m \x1b[32mgreen\x1b[0m"), + "red green" + ); + } + + #[test] + fn test_strip_ansi_complex() { + assert_eq!( + utils::strip_ansi("\x1b[1;31;42mbold red on green\x1b[0m"), + "bold red on green" + ); + } + + #[test] + fn test_filter_cargo_keeps_errors() { + let input = "Compiling dep1\nerror: something wrong\nCompiling dep2"; + let output = filter_cargo_output(input); + assert!(output.contains("error")); + assert!(!output.contains("Compiling dep1")); + } + + #[test] + fn test_filter_cargo_keeps_warnings() { + let input = "Compiling dep1\nwarning: unused variable\nCompiling dep2"; + let output = filter_cargo_output(input); + assert!(output.contains("warning")); + } + + #[test] + fn test_filter_test_keeps_failures() { + let input = "test foo ... ok\ntest bar ... FAILED\ntest result: 1 passed; 1 failed"; + let output = filter_test_output(input); + assert!(output.contains("FAILED")); + assert!(output.contains("test result:")); + assert!(!output.contains("test foo")); + } + + fn truncate_lines(output: &str, max_lines: usize) -> String { + let lines: Vec<&str> = output.lines().collect(); + if lines.len() <= max_lines { + output.to_string() + } else { + let truncated: Vec<&str> = lines.iter().take(max_lines).copied().collect(); + format!( + "{}\n... ({} more lines)", + truncated.join("\n"), + lines.len() - max_lines + ) + } + } + + #[test] + fn test_truncate_short() { + let input = "line1\nline2\nline3"; + let output = truncate_lines(input, 10); + assert_eq!(output, input); + } + + #[test] + fn test_truncate_long() { + let input = "line1\nline2\nline3\nline4\nline5"; + let output = truncate_lines(input, 3); + assert!(output.contains("line3")); + assert!(!output.contains("line4")); + assert!(output.contains("2 more lines")); + } + + #[test] + fn test_get_filter_mode_grep_is_streaming() { + assert!(matches!(get_filter_mode("grep"), FilterMode::Streaming(_))); + } + + #[test] + fn test_get_filter_mode_rg_is_streaming() { + assert!(matches!(get_filter_mode("rg"), FilterMode::Streaming(_))); + } + + #[test] + fn test_get_filter_mode_find_is_streaming() { + assert!(matches!(get_filter_mode("find"), FilterMode::Streaming(_))); + } + + #[test] + fn test_get_filter_mode_fd_is_streaming() { + assert!(matches!(get_filter_mode("fd"), FilterMode::Streaming(_))); + } + + #[test] + fn test_get_filter_mode_ls_is_streaming() { + assert!(matches!(get_filter_mode("ls"), FilterMode::Streaming(_))); + } + + #[test] + fn test_get_filter_mode_cargo_is_buffered() { + assert!(matches!(get_filter_mode("cargo"), FilterMode::Buffered(_))); + } + + #[test] + fn test_get_filter_mode_mypy_is_buffered() { + assert!(matches!(get_filter_mode("mypy"), FilterMode::Buffered(_))); + } + + #[test] + fn test_get_filter_mode_ruff_is_buffered() { + assert!(matches!(get_filter_mode("ruff"), FilterMode::Buffered(_))); + } + + #[test] + fn test_get_filter_mode_golangci_lint_is_buffered() { + assert!(matches!( + get_filter_mode("golangci-lint"), + FilterMode::Buffered(_) + )); + } + + #[test] + fn test_get_filter_mode_npm_is_streaming() { + assert!(matches!(get_filter_mode("npm"), FilterMode::Streaming(_))); + } + + #[test] + fn test_get_filter_mode_pnpm_is_streaming() { + assert!(matches!(get_filter_mode("pnpm"), FilterMode::Streaming(_))); + } + + #[test] + fn test_get_filter_mode_git_is_streaming() { + assert!(matches!(get_filter_mode("git"), FilterMode::Streaming(_))); + } + + #[test] + fn test_get_filter_mode_unknown_is_passthrough() { + assert!(matches!( + get_filter_mode("unknowncmd"), + FilterMode::Passthrough + )); + } + + #[test] + fn test_get_filter_mode_grep_strips_ansi_and_emits() { + let mut mode = get_filter_mode("grep"); + if let FilterMode::Streaming(ref mut filter) = mode { + let result = filter.feed_line("\x1b[32msrc/main.rs:42:fn main\x1b[0m"); + assert!(result.is_some(), "streaming filter must emit a line"); + let out = result.unwrap(); + assert!( + out.contains("src/main.rs"), + "ANSI stripped, path preserved: {}", + out + ); + assert!( + !out.contains("\x1b["), + "ANSI codes must be stripped: {}", + out + ); + } else { + panic!("Expected FilterMode::Streaming for 'grep'"); + } + } + + #[test] + fn test_get_filter_mode_find_truncates_long_lines() { + let long_line = "a".repeat(200); + let mut mode = get_filter_mode("find"); + if let FilterMode::Streaming(ref mut filter) = mode { + let result = filter.feed_line(&long_line); + assert!(result.is_some()); + let out = result.unwrap(); + assert!( + out.len() <= 125, + "line must be truncated: len={}", + out.len() + ); + assert!(out.contains("..."), "truncated line must contain '...'"); + } else { + panic!("Expected FilterMode::Streaming for 'find'"); + } + } + + #[test] + fn test_get_filter_mode_rg_short_line_passes_through() { + let short_line = "src/foo.rs:10:hello"; + let mut mode = get_filter_mode("rg"); + if let FilterMode::Streaming(ref mut filter) = mode { + let result = filter.feed_line(short_line); + assert!(result.is_some()); + let out = result.unwrap(); + assert!(out.contains("src/foo.rs"), "out={}", out); + } else { + panic!("Expected FilterMode::Streaming for 'rg'"); + } + } + + #[test] + fn test_get_filter_mode_go_is_passthrough() { + assert!(matches!(get_filter_mode("go"), FilterMode::Passthrough)); + } + + #[test] + fn test_get_filter_mode_npx_is_streaming() { + assert!(matches!(get_filter_mode("npx"), FilterMode::Streaming(_))); + } + + #[test] + fn test_get_filter_mode_npm_strips_ansi() { + let mut mode = get_filter_mode("npm"); + if let FilterMode::Streaming(ref mut filter) = mode { + let result = filter.feed_line("\x1b[33mWARN\x1b[0m deprecated package"); + assert!(result.is_some()); + let out = result.unwrap(); + assert!(out.contains("WARN"), "content preserved: {}", out); + assert!(!out.contains("\x1b["), "ANSI codes stripped: {}", out); + } else { + panic!("Expected FilterMode::Streaming for 'npm'"); + } + } + + #[test] + fn test_filter_test_output_no_failures_returns_empty() { + let input = "test foo ... ok\ntest bar ... ok\ntest baz ... ok"; + let output = filter_test_output(input); + assert!( + output.is_empty(), + "all-passing tests should produce empty output" + ); + } + + #[test] + fn test_filter_cargo_output_only_compiling() { + let input = "Compiling dep1\nCompiling dep2\nCompiling dep3"; + let output = filter_cargo_output(input); + assert!( + output.is_empty() || output.trim().is_empty(), + "pure Compiling output should be filtered out" + ); + } + + #[test] + fn test_filter_test_output_keeps_separator_lines() { + let input = "test foo ... ok\n---- test_bar stdout ----\nerror: assertion failed\ntest result: 0 passed; 1 failed"; + let output = filter_test_output(input); + assert!(output.contains("----"), "separator lines preserved"); + assert!(output.contains("error:"), "error lines preserved"); + assert!(output.contains("test result:"), "summary preserved"); + assert!(!output.contains("test foo"), "passing test filtered out"); + } +} diff --git a/src/cmd/hook/claude.rs b/src/cmd/hook/claude.rs new file mode 100644 index 000000000..85c992765 --- /dev/null +++ b/src/cmd/hook/claude.rs @@ -0,0 +1,539 @@ +#![deny(clippy::print_stdout, clippy::print_stderr)] + +use super::{ + check_for_hook, is_hook_disabled, should_passthrough, update_command_in_tool_input, + HookResponse, HookResult, +}; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use std::io::{self, Read, Write}; + +#[derive(Deserialize)] +pub(crate) struct ClaudePayload { + tool_input: Option, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct ClaudeResponse { + hook_specific_output: HookOutput, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct HookOutput { + hook_event_name: &'static str, + permission_decision: &'static str, + permission_decision_reason: String, + #[serde(skip_serializing_if = "Option::is_none")] + updated_input: Option, +} + +#[derive(Deserialize)] +struct ManifestFallthroughEntry { + fallthrough_command: String, +} + +#[derive(Deserialize)] +struct ManifestFallthrough { + entries: Vec, +} + +pub(crate) fn extract_command(payload: &ClaudePayload) -> Option<&str> { + payload + .tool_input + .as_ref()? + .get("command")? + .as_str() + .filter(|s| !s.is_empty()) +} + +pub(crate) fn allow_response(reason: String, updated_input: Option) -> ClaudeResponse { + ClaudeResponse { + hook_specific_output: HookOutput { + hook_event_name: "PreToolUse", + permission_decision: "allow", + permission_decision_reason: reason, + updated_input, + }, + } +} + +pub(crate) fn deny_response(reason: String) -> ClaudeResponse { + ClaudeResponse { + hook_specific_output: HookOutput { + hook_event_name: "PreToolUse", + permission_decision: "deny", + permission_decision_reason: reason, + updated_input: None, + }, + } +} + +pub fn run() -> anyhow::Result<()> { + let mut buffer = String::new(); + io::stdin().read_to_string(&mut buffer)?; + + let response = match run_inner(&buffer) { + Ok(r) => r, + Err(_) => HookResponse::NoOpinion, + }; + + match response { + HookResponse::NoOpinion => match run_manifest_handlers(&buffer) { + ManifestResult::Blocked { json, stderr_bytes } => { + writeln!(io::stdout(), "{json}")?; + io::stderr().write_all(&stderr_bytes)?; + if stderr_bytes.is_empty() { + writeln!(io::stderr(), "Command blocked by registered handler")?; + } + std::process::exit(2); + } + ManifestResult::NoBlock => {} + }, + HookResponse::Allow(rtk_json) => match run_manifest_handlers(&buffer) { + ManifestResult::Blocked { + json: handler_json, + stderr_bytes, + } => { + writeln!(io::stdout(), "{handler_json}")?; + io::stderr().write_all(&stderr_bytes)?; + if stderr_bytes.is_empty() { + let reason = extract_deny_reason(&handler_json).unwrap_or_else(|| { + "Command blocked by registered safety handler".to_owned() + }); + writeln!(io::stderr(), "{reason}")?; + } + std::process::exit(2); + } + ManifestResult::NoBlock => { + writeln!(io::stdout(), "{rtk_json}")?; + } + }, + HookResponse::Deny(json, reason) => { + // ISSUE #4669: dual-path deny workaround — stdout JSON + stderr reason + exit 2 + writeln!(io::stdout(), "{json}")?; + writeln!(io::stderr(), "{reason}")?; + std::process::exit(2); + } + } + Ok(()) +} + +fn run_inner(buffer: &str) -> anyhow::Result { + let payload: ClaudePayload = match serde_json::from_str(buffer) { + Ok(p) => p, + Err(_) => return Ok(HookResponse::NoOpinion), + }; + + let cmd = match extract_command(&payload) { + Some(c) => c, + None => return Ok(HookResponse::NoOpinion), + }; + + if is_hook_disabled() || should_passthrough(cmd) { + return Ok(HookResponse::NoOpinion); + } + + let result = check_for_hook(cmd, "claude"); + + match result { + HookResult::Rewrite(new_cmd) => { + let updated = update_command_in_tool_input(payload.tool_input, new_cmd); + + let response = allow_response("RTK safety rewrite applied".into(), Some(updated)); + let json = serde_json::to_string(&response)?; + Ok(HookResponse::Allow(json)) + } + HookResult::Blocked(msg) => { + let response = deny_response(msg.clone()); + let json = serde_json::to_string(&response)?; + Ok(HookResponse::Deny(json, msg)) + } + } +} + +fn manifest_path() -> Option { + let home = std::env::var("HOME") + .or_else(|_| std::env::var("USERPROFILE")) + .ok()?; + Some( + std::path::Path::new(&home) + .join(".claude") + .join("hooks") + .join("rtk-bash-manifest.json"), + ) +} + +fn is_json_deny(json_str: &str) -> bool { + let Ok(v) = serde_json::from_str::(json_str.trim()) else { + return false; + }; + let cc_deny = v + .get("hookSpecificOutput") + .and_then(|o| o.get("permissionDecision")) + .and_then(|d| d.as_str()) + == Some("deny"); + let gemini_deny = v.get("decision").and_then(|d| d.as_str()) == Some("deny"); + cc_deny || gemini_deny +} + +fn extract_deny_reason(json_str: &str) -> Option { + let v: Value = serde_json::from_str(json_str.trim()).ok()?; + if let Some(r) = v + .get("hookSpecificOutput") + .and_then(|o| o.get("permissionDecisionReason")) + .and_then(|r| r.as_str()) + { + return Some(r.to_owned()); + } + v.get("reason").and_then(|r| r.as_str()).map(str::to_owned) +} + +enum ManifestResult { + Blocked { json: String, stderr_bytes: Vec }, + NoBlock, +} + +fn load_manifest() -> Option { + let path = manifest_path()?; + if !path.exists() { + return None; + } + let content = std::fs::read_to_string(&path).ok()?; + serde_json::from_str(&content).ok() +} + +fn run_manifest_handlers(payload: &str) -> ManifestResult { + let manifest = match load_manifest() { + Some(m) => m, + None => return ManifestResult::NoBlock, + }; + + let mut block_json: Option = None; + let mut block_stderr: Vec = Vec::new(); + + for entry in &manifest.entries { + let mut child = match std::process::Command::new("sh") + .arg("-c") + .arg(&entry.fallthrough_command) + .stdin(std::process::Stdio::piped()) + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn() + { + Ok(c) => c, + Err(_) => continue, + }; + + // Track write success to avoid false-positive exit 2 on partial stdin + let write_ok = if let Some(mut stdin) = child.stdin.take() { + io::Write::write_all(&mut stdin, payload.as_bytes()).is_ok() + } else { + false + }; + + let output = match child.wait_with_output() { + Ok(o) => o, + Err(_) => continue, + }; + + let exit_code = output.status.code().unwrap_or(0); + let stdout_str = String::from_utf8_lossy(&output.stdout); + let blocked = (exit_code == 2 && write_ok) || is_json_deny(&stdout_str); + + if blocked && block_json.is_none() { + block_json = Some(stdout_str.into_owned()); + block_stderr.extend_from_slice(&output.stderr); + } + } + + match block_json { + Some(json) => ManifestResult::Blocked { + json, + stderr_bytes: block_stderr, + }, + None => ManifestResult::NoBlock, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cmd::test_helpers::EnvGuard; + + #[test] + fn test_output_uses_hook_specific_output() { + let response = allow_response("test".into(), None); + let json = serde_json::to_string(&response).unwrap(); + let parsed: Value = serde_json::from_str(&json).unwrap(); + + assert!( + parsed.get("hookSpecificOutput").is_some(), + "must have 'hookSpecificOutput' field" + ); + assert!( + parsed.get("hook_specific_output").is_none(), + "must NOT have snake_case field" + ); + } + + #[test] + fn test_output_uses_permission_decision() { + let response = allow_response("test".into(), None); + let json = serde_json::to_string(&response).unwrap(); + let parsed: Value = serde_json::from_str(&json).unwrap(); + let output = &parsed["hookSpecificOutput"]; + + assert!( + output.get("permissionDecision").is_some(), + "must have 'permissionDecision' field" + ); + assert!( + output.get("decision").is_none(), + "must NOT have Gemini-style 'decision' field" + ); + } + + #[test] + fn test_output_uses_permission_decision_reason() { + let response = deny_response("blocked".into()); + let json = serde_json::to_string(&response).unwrap(); + let parsed: Value = serde_json::from_str(&json).unwrap(); + let output = &parsed["hookSpecificOutput"]; + + assert!( + output.get("permissionDecisionReason").is_some(), + "must have 'permissionDecisionReason'" + ); + } + + #[test] + fn test_output_uses_hook_event_name() { + let response = allow_response("test".into(), None); + let json = serde_json::to_string(&response).unwrap(); + let parsed: Value = serde_json::from_str(&json).unwrap(); + + assert_eq!(parsed["hookSpecificOutput"]["hookEventName"], "PreToolUse"); + } + + #[test] + fn test_output_uses_updated_input_for_rewrite() { + let input = serde_json::json!({"command": "rtk run -c 'git status'"}); + let response = allow_response("rewrite".into(), Some(input)); + let json = serde_json::to_string(&response).unwrap(); + let parsed: Value = serde_json::from_str(&json).unwrap(); + + assert!( + parsed["hookSpecificOutput"].get("updatedInput").is_some(), + "must have 'updatedInput' for rewrites" + ); + } + + #[test] + fn test_allow_omits_updated_input_when_none() { + let response = allow_response("passthrough".into(), None); + let json = serde_json::to_string(&response).unwrap(); + + assert!( + !json.contains("updatedInput"), + "updatedInput must be omitted when None" + ); + } + + #[test] + fn test_rewrite_preserves_other_tool_input_fields() { + let original = serde_json::json!({ + "command": "git status", + "timeout": 30, + "description": "check repo" + }); + + let mut updated = original.clone(); + if let Some(obj) = updated.as_object_mut() { + obj.insert( + "command".into(), + Value::String("rtk run -c 'git status'".into()), + ); + } + + assert_eq!(updated["timeout"], 30); + assert_eq!(updated["description"], "check repo"); + assert_eq!(updated["command"], "rtk run -c 'git status'"); + } + + #[test] + fn test_output_decision_values() { + let allow = allow_response("test".into(), None); + let deny = deny_response("blocked".into()); + + let allow_json: Value = + serde_json::from_str(&serde_json::to_string(&allow).unwrap()).unwrap(); + let deny_json: Value = + serde_json::from_str(&serde_json::to_string(&deny).unwrap()).unwrap(); + + assert_eq!( + allow_json["hookSpecificOutput"]["permissionDecision"], + "allow" + ); + assert_eq!( + deny_json["hookSpecificOutput"]["permissionDecision"], + "deny" + ); + } + + #[test] + fn test_input_extra_fields_ignored() { + let json = r#"{"tool_input": {"command": "ls"}, "tool_name": "Bash", "session_id": "abc-123", "session_cwd": "/tmp", "transcript_path": "/path/to/transcript.jsonl"}"#; + let payload: ClaudePayload = serde_json::from_str(json).unwrap(); + assert_eq!(extract_command(&payload), Some("ls")); + } + + #[test] + fn test_input_tool_input_is_object() { + let json = r#"{"tool_input": {"command": "git status", "timeout": 30}}"#; + let payload: ClaudePayload = serde_json::from_str(json).unwrap(); + let input = payload.tool_input.unwrap(); + assert_eq!(input["command"].as_str().unwrap(), "git status"); + assert_eq!(input["timeout"].as_i64().unwrap(), 30); + } + + #[test] + fn test_extract_command_basic() { + let payload: ClaudePayload = + serde_json::from_str(r#"{"tool_input": {"command": "git status"}}"#).unwrap(); + assert_eq!(extract_command(&payload), Some("git status")); + } + + #[test] + fn test_extract_command_missing_tool_input() { + let payload: ClaudePayload = serde_json::from_str(r#"{}"#).unwrap(); + assert_eq!(extract_command(&payload), None); + } + + #[test] + fn test_extract_command_missing_command_field() { + let payload: ClaudePayload = + serde_json::from_str(r#"{"tool_input": {"cwd": "/tmp"}}"#).unwrap(); + assert_eq!(extract_command(&payload), None); + } + + #[test] + fn test_extract_command_empty_string() { + let payload: ClaudePayload = + serde_json::from_str(r#"{"tool_input": {"command": ""}}"#).unwrap(); + assert_eq!(extract_command(&payload), None); + } + + #[test] + fn test_shared_should_passthrough_rtk_prefix() { + assert!(should_passthrough("rtk run -c 'ls'")); + assert!(should_passthrough("rtk cargo test")); + assert!(should_passthrough("/usr/local/bin/rtk run -c 'ls'")); + } + + #[test] + fn test_shared_should_passthrough_heredoc() { + assert!(should_passthrough("cat <(input); + } + } + + #[test] + fn test_run_inner_returns_no_opinion_for_empty_payload() { + let payload: ClaudePayload = serde_json::from_str("{}").unwrap(); + assert_eq!(extract_command(&payload), None); + } + + #[test] + fn test_shared_is_hook_disabled_hook_enabled_zero() { + let _env = EnvGuard::new(); + std::env::set_var("RTK_HOOK_ENABLED", "0"); + assert!(is_hook_disabled()); + } + + #[test] + fn test_shared_is_hook_disabled_rtk_active() { + let _env = EnvGuard::new(); + std::env::set_var("RTK_ACTIVE", "1"); + assert!(is_hook_disabled()); + } + + #[test] + fn test_deny_response_includes_reason_for_stderr() { + // ISSUE #4669: deny must provide plain text reason for stderr dual-path workaround + let msg = "RTK: cat is blocked (use rtk read instead)"; + let response = deny_response(msg.to_string()); + let json = serde_json::to_string(&response).unwrap(); + let parsed: Value = serde_json::from_str(&json).unwrap(); + + assert_eq!(parsed["hookSpecificOutput"]["permissionDecision"], "deny"); + assert_eq!( + parsed["hookSpecificOutput"]["permissionDecisionReason"], + msg + ); + } + + #[test] + fn test_is_json_deny_claude_code_format() { + let json = r#"{"hookSpecificOutput":{"permissionDecision":"deny","permissionDecisionReason":"blocked"}}"#; + assert!(is_json_deny(json)); + } + + #[test] + fn test_is_json_deny_gemini_format() { + let json = r#"{"decision":"deny","reason":"blocked"}"#; + assert!(is_json_deny(json)); + } + + #[test] + fn test_is_json_deny_allow_not_matched() { + assert!(!is_json_deny( + r#"{"hookSpecificOutput":{"permissionDecision":"allow"}}"# + )); + assert!(!is_json_deny(r#"{"decision":"allow"}"#)); + assert!(!is_json_deny("")); + assert!(!is_json_deny("not json")); + } + + #[test] + fn test_extract_deny_reason_cc_format() { + let json = r#"{"hookSpecificOutput":{"permissionDecision":"deny","permissionDecisionReason":"Use Grep tool"}}"#; + assert_eq!(extract_deny_reason(json), Some("Use Grep tool".to_owned())); + } + + #[test] + fn test_extract_deny_reason_gemini_format() { + let json = r#"{"decision":"deny","reason":"command blocked"}"#; + assert_eq!( + extract_deny_reason(json), + Some("command blocked".to_owned()) + ); + } + + #[test] + fn test_extract_deny_reason_missing() { + assert_eq!(extract_deny_reason("{}"), None); + assert_eq!(extract_deny_reason("not json"), None); + } + + #[test] + fn test_load_manifest_returns_none_when_missing() { + let result = load_manifest(); + drop(result); + } +} diff --git a/src/cmd/hook/mod.rs b/src/cmd/hook/mod.rs new file mode 100644 index 000000000..ba1e1ea37 --- /dev/null +++ b/src/cmd/hook/mod.rs @@ -0,0 +1,1536 @@ +pub(crate) mod claude; + +use super::{analysis, lexer}; + +#[derive(Debug, Clone)] +pub enum HookResult { + Rewrite(String), + Blocked(String), +} + +const MAX_REWRITE_DEPTH: usize = 3; + +#[derive(Debug, Clone, PartialEq)] +pub enum HookResponse { + NoOpinion, + Allow(String), + Deny(String, String), +} + +pub fn check_for_hook(raw: &str, _agent: &str) -> HookResult { + check_for_hook_inner(raw, 0) +} + +fn check_for_hook_inner(raw: &str, depth: usize) -> HookResult { + if depth >= MAX_REWRITE_DEPTH { + return HookResult::Blocked("Rewrite loop detected (max depth exceeded)".to_string()); + } + if raw.trim().is_empty() { + return HookResult::Rewrite(raw.to_string()); + } + + let tokens = lexer::tokenize(raw); + + let (core_tokens, suffix) = analysis::split_safe_suffix(tokens); + + if analysis::needs_shell(&core_tokens) { + return HookResult::Rewrite(format!("rtk run -c '{}'", escape_quotes(raw))); + } + + match analysis::parse_chain(core_tokens) { + Ok(commands) => { + if commands.len() == 1 { + let routed = if suffix.is_empty() { + try_route_native_command(&commands[0], raw) + } else { + let core_raw = if commands[0].args.is_empty() { + commands[0].binary.clone() + } else { + format!("{} {}", commands[0].binary, commands[0].args.join(" ")) + }; + try_route_native_command(&commands[0], &core_raw) + }; + + match routed { + Some(rtk_cmd) => { + if suffix.is_empty() { + HookResult::Rewrite(rtk_cmd) + } else { + HookResult::Rewrite(format!("{} {}", rtk_cmd, suffix)) + } + } + None => HookResult::Rewrite(raw.to_string()), + } + } else { + let substituted = reconstruct_with_rtk(&commands); + let inner = if suffix.is_empty() { + substituted + } else { + format!("{} {}", substituted, suffix) + }; + HookResult::Rewrite(format!("rtk run -c '{}'", escape_quotes(&inner))) + } + } + Err(_) => HookResult::Rewrite(raw.to_string()), + } +} + +pub fn is_hook_disabled() -> bool { + std::env::var("RTK_HOOK_ENABLED").as_deref() == Ok("0") || std::env::var("RTK_ACTIVE").is_ok() +} + +pub fn should_passthrough(cmd: &str) -> bool { + if cmd.starts_with("rtk ") || cmd.contains("/rtk ") || cmd.contains("<<") { + return true; + } + // ISSUE #196: gh --json/--jq/--template produces structured output that rtk gh + // would corrupt. Pass through unchanged so callers get raw JSON. + if (cmd.starts_with("gh ") || cmd.contains(" gh ")) + && (cmd.contains("--json") || cmd.contains("--jq") || cmd.contains("--template")) + { + return true; + } + false +} + +pub fn update_command_in_tool_input( + tool_input: Option, + new_cmd: String, +) -> serde_json::Value { + use serde_json::Value; + let mut updated = tool_input.unwrap_or_else(|| Value::Object(Default::default())); + if let Some(obj) = updated.as_object_mut() { + obj.insert("command".into(), Value::String(new_cmd)); + } + updated +} + +#[cfg(test)] +const FORMAT_PRESERVING: &[&str] = &["tail", "echo", "cat", "find", "fd"]; + +#[cfg(test)] +const TRANSPARENT_SINKS: &[&str] = &["tee", "head", "tail", "cat"]; + +fn escape_quotes(s: &str) -> String { + s.replace("'", "'\\''") +} + +fn is_env_assign(s: &str) -> bool { + if let Some(eq_pos) = s.find('=') { + let key = &s[..eq_pos]; + !key.is_empty() + && key + .chars() + .next() + .is_some_and(|c| c.is_ascii_alphabetic() || c == '_') + && key.chars().all(|c| c.is_ascii_alphanumeric() || c == '_') + } else { + false + } +} + +fn replace_first_word(raw: &str, old_prefix: &str, new_prefix: &str) -> String { + raw.strip_prefix(old_prefix) + .map(|rest| format!("{new_prefix}{rest}")) + .unwrap_or_else(|| format!("rtk run -c '{}'", escape_quotes(raw))) +} + +fn route_pnpm(cmd: &analysis::NativeCommand, raw: &str) -> String { + let sub = cmd.args.first().map(String::as_str).unwrap_or(""); + match sub { + "list" | "ls" | "outdated" | "install" => format!("rtk {raw}"), + + // ISSUE #112: shell script sed bug produces "rtk vitest run run --coverage" + "vitest" => { + let after_vitest: Vec<&str> = cmd.args[1..] + .iter() + .map(String::as_str) + .skip_while(|&a| a == "run") + .collect(); + if after_vitest.is_empty() { + "rtk vitest run".to_string() + } else { + format!("rtk vitest run {}", after_vitest.join(" ")) + } + } + + "test" => { + let after_test: Vec<&str> = cmd.args[1..].iter().map(String::as_str).collect(); + if after_test.is_empty() { + "rtk vitest run".to_string() + } else { + format!("rtk vitest run {}", after_test.join(" ")) + } + } + + "tsc" => replace_first_word(raw, "pnpm tsc", "rtk tsc"), + "lint" => replace_first_word(raw, "pnpm lint", "rtk lint"), + "eslint" => replace_first_word(raw, "pnpm eslint", "rtk lint"), + "playwright" => replace_first_word(raw, "pnpm playwright", "rtk playwright"), + + _ => format!("rtk run -c '{}'", escape_quotes(raw)), + } +} + +fn route_npx(cmd: &analysis::NativeCommand, raw: &str) -> String { + let sub = cmd.args.first().map(String::as_str).unwrap_or(""); + match sub { + "tsc" | "typescript" => replace_first_word(raw, &format!("npx {sub}"), "rtk tsc"), + "eslint" => replace_first_word(raw, "npx eslint", "rtk lint"), + "prettier" => replace_first_word(raw, "npx prettier", "rtk prettier"), + "playwright" => replace_first_word(raw, "npx playwright", "rtk playwright"), + "prisma" => replace_first_word(raw, "npx prisma", "rtk prisma"), + + "vitest" => { + let after_vitest: Vec<&str> = cmd.args[1..] + .iter() + .map(String::as_str) + .skip_while(|&a| a == "run") + .collect(); + if after_vitest.is_empty() { + "rtk vitest run".to_string() + } else { + format!("rtk vitest run {}", after_vitest.join(" ")) + } + } + + _ => format!("rtk run -c '{}'", escape_quotes(raw)), + } +} + +fn hook_lookup<'a>(binary: &'a str, sub: &str) -> Option<(&'static str, &'a str)> { + let base = binary.rsplit('/').next().unwrap_or(binary); + match base { + "git" => match sub { + "status" | "log" | "diff" | "show" | "add" | "commit" | "push" | "pull" | "fetch" + | "stash" | "branch" | "worktree" => Some(("rtk git", binary)), + _ => None, + }, + "gh" => match sub { + "pr" | "issue" | "run" => Some(("rtk gh", binary)), + _ => None, + }, + "cargo" => match sub { + "test" | "build" | "clippy" | "check" | "install" | "fmt" => { + Some(("rtk cargo", binary)) + } + _ => None, + }, + "docker" => match sub { + "ps" | "images" | "logs" => Some(("rtk docker", binary)), + _ => None, + }, + "kubectl" => match sub { + "get" | "logs" => Some(("rtk kubectl", binary)), + _ => None, + }, + "go" => match sub { + "test" | "build" | "vet" => Some(("rtk go", binary)), + _ => None, + }, + "ruff" => match sub { + "check" | "format" => Some(("rtk ruff", binary)), + _ => None, + }, + "pip" | "pip3" => match sub { + "list" | "outdated" | "install" | "show" => Some(("rtk pip", binary)), + _ => None, + }, + "grep" => Some(("rtk grep", binary)), + "rg" => Some(("rtk grep", binary)), + "ls" => Some(("rtk ls", binary)), + "eslint" => Some(("rtk lint", binary)), + "biome" => Some(("rtk lint", binary)), + "tsc" => Some(("rtk tsc", binary)), + "prettier" => Some(("rtk prettier", binary)), + "golangci-lint" | "golangci" => Some(("rtk golangci-lint", binary)), + "mypy" => Some(("rtk mypy", binary)), + "playwright" => Some(("rtk playwright", binary)), + "prisma" => Some(("rtk prisma", binary)), + "curl" => Some(("rtk curl", binary)), + "pytest" => Some(("rtk pytest", binary)), + "wc" => Some(("rtk wc", binary)), + "gt" => Some(("rtk gt", binary)), + "wget" | "diff" | "tree" | "find" => None, + _ => None, + } +} + +fn is_shell_prefix_builtin(token: &str) -> bool { + matches!( + token, + "noglob" | "command" | "builtin" | "exec" | "nocorrect" + ) +} + +pub(crate) fn route_native_command(cmd: &analysis::NativeCommand, raw: &str) -> String { + if is_shell_prefix_builtin(&cmd.binary) { + if let Some(real_binary) = cmd.args.first() { + let prefix = &cmd.binary; + let real_args = cmd.args[1..].to_vec(); + let real_cmd = analysis::NativeCommand { + binary: real_binary.clone(), + args: real_args, + operator: cmd.operator.clone(), + }; + let core_raw = raw + .strip_prefix(prefix) + .map(|s| s.trim_start()) + .unwrap_or(raw); + return match try_route_native_command(&real_cmd, core_raw) { + Some(routed) => format!("{} {}", prefix, routed), + None => raw.to_string(), + }; + } + return raw.to_string(); + } + + if is_env_assign(&cmd.binary) { + let mut env_parts: Vec<&str> = vec![cmd.binary.as_str()]; + let mut arg_idx = 0; + while arg_idx < cmd.args.len() && is_env_assign(&cmd.args[arg_idx]) { + env_parts.push(&cmd.args[arg_idx]); + arg_idx += 1; + } + if arg_idx < cmd.args.len() { + let env_prefix_str = env_parts.join(" "); + let core_raw = raw + .strip_prefix(&env_prefix_str) + .map(|s| s.trim_start()) + .unwrap_or_else(|| { + let skip = env_prefix_str.len(); + if skip < raw.len() { + raw[skip..].trim_start() + } else { + raw + } + }); + let real_binary = cmd.args[arg_idx].clone(); + let real_args = cmd.args[arg_idx + 1..].to_vec(); + let real_cmd = analysis::NativeCommand { + binary: real_binary, + args: real_args, + operator: cmd.operator.clone(), + }; + return match try_route_native_command(&real_cmd, core_raw) { + Some(routed) => format!("{} {}", env_prefix_str, routed), + None => raw.to_string(), + }; + } + } + + let sub = cmd.args.first().map(String::as_str).unwrap_or(""); + let sub2 = cmd.args.get(1).map(String::as_str).unwrap_or(""); + + if let Some((rtk_full, prefix)) = hook_lookup(&cmd.binary, sub) { + return replace_first_word(raw, prefix, rtk_full); + } + + if cmd.binary == "cat" { + return replace_first_word(raw, "cat", "rtk read"); + } + + match cmd.binary.as_str() { + "vitest" if sub.is_empty() => "rtk vitest run".to_string(), + "vitest" => format!("rtk {raw}"), + + "uv" if sub == "pip" && matches!(sub2, "list" | "outdated" | "install" | "show") => { + replace_first_word(raw, "uv pip", "rtk pip") + } + + "python" | "python3" if sub == "-m" && sub2 == "pytest" => { + let prefix = format!("{} -m pytest", cmd.binary); + replace_first_word(raw, &prefix, "rtk pytest") + } + + "python" | "python3" if sub == "-m" && sub2 == "mypy" => { + let prefix = format!("{} -m mypy", cmd.binary); + replace_first_word(raw, &prefix, "rtk mypy") + } + + "pnpm" => route_pnpm(cmd, raw), + "npx" => route_npx(cmd, raw), + + _ => format!("rtk run -c '{}'", escape_quotes(raw)), + } +} + +pub(crate) fn try_route_native_command(cmd: &analysis::NativeCommand, raw: &str) -> Option { + let routed = route_native_command(cmd, raw); + if routed.starts_with("rtk run -c") { + None + } else { + Some(routed) + } +} + +fn reconstruct_with_rtk(commands: &[analysis::NativeCommand]) -> String { + commands + .iter() + .map(|cmd| { + let core_raw = if cmd.args.is_empty() { + cmd.binary.clone() + } else { + format!("{} {}", cmd.binary, cmd.args.join(" ")) + }; + + let part = match try_route_native_command(cmd, &core_raw) { + Some(routed) => routed, + None => core_raw, + }; + + match &cmd.operator { + Some(op) => format!("{} {}", part, op), + None => part, + } + }) + .collect::>() + .join(" ") +} + +pub fn format_for_claude(result: HookResult) -> (String, bool, i32) { + match result { + HookResult::Rewrite(cmd) => (cmd, true, 0), + HookResult::Blocked(msg) => (msg, false, 2), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn assert_rewrite(input: &str, contains: &str) { + match check_for_hook(input, "claude") { + HookResult::Rewrite(cmd) => assert!( + cmd.contains(contains), + "'{}' rewrite should contain '{}', got '{}'", + input, + contains, + cmd + ), + other => panic!("Expected Rewrite for '{}', got {:?}", input, other), + } + } + + fn assert_blocked(input: &str, depth: usize, contains: &str) { + match check_for_hook_inner(input, depth) { + HookResult::Blocked(msg) => assert!( + msg.contains(contains), + "'{}' block msg should contain '{}', got '{}'", + input, + contains, + msg + ), + other => panic!("Expected Blocked for '{}', got {:?}", input, other), + } + } + + fn assert_passthrough(input: &str) { + match check_for_hook(input, "claude") { + HookResult::Rewrite(cmd) => { + assert!( + !cmd.contains("rtk run -c"), + "command should NOT be wrapped in rtk run -c, got '{}'", + cmd + ); + assert_eq!(cmd, input, "unknown command should pass through unchanged"); + } + HookResult::Blocked(_) => panic!("Expected passthrough for '{}', got Blocked", input), + } + } + + #[test] + fn test_escape_quotes() { + assert_eq!(escape_quotes("hello"), "hello"); + assert_eq!(escape_quotes("it's"), "it'\\''s"); + assert_eq!(escape_quotes("it's a test's"), "it'\\''s a test'\\''s"); + } + + #[test] + fn test_check_empty_and_whitespace() { + match check_for_hook("", "claude") { + HookResult::Rewrite(cmd) => assert!(cmd.is_empty()), + _ => panic!("Expected Rewrite for empty"), + } + match check_for_hook(" ", "claude") { + HookResult::Rewrite(cmd) => assert!(cmd.trim().is_empty()), + _ => panic!("Expected Rewrite for whitespace"), + } + } + + #[test] + fn test_safe_commands_rewrite() { + assert_rewrite("git status", "rtk git status"); + assert_rewrite(r#"git commit -m "Fix && Bug""#, "rtk git commit"); + + let shell_cases = [ + ("ls *.rs", "rtk run"), + ("echo `date`", "rtk run"), + ("echo $(date)", "rtk run"), + ("echo {a,b}.txt", "rtk run"), + ("cd /tmp && git status", "rtk run"), + ]; + for (input, expected) in shell_cases { + assert_rewrite(input, expected); + } + + assert_passthrough("FOO=bar echo hello"); + assert_passthrough("echo 'hello!@#$%^&*()'"); + assert_passthrough(&format!("echo {}", "a".repeat(1000))); + + match check_for_hook("cd /tmp && git status", "claude") { + HookResult::Rewrite(cmd) => assert!( + cmd.contains("&&"), + "Chain rewrite must preserve '&&', got '{}'", + cmd + ), + other => panic!("Expected Rewrite for chain, got {:?}", other), + } + } + + #[test] + fn test_env_prefix_routes_to_rtk_subcommand() { + let cases = [ + ("GIT_PAGER=cat git status", "rtk git", "GIT_PAGER=cat"), + ( + "GIT_PAGER=cat git log --oneline -10", + "rtk git", + "GIT_PAGER=cat", + ), + ("RUST_LOG=debug cargo test", "rtk cargo", "RUST_LOG=debug"), + ("LANG=C ls -la", "rtk ls", "LANG=C"), + ( + "TEST_SESSION_ID=2 npx playwright test --config=foo", + "rtk playwright", + "TEST_SESSION_ID=2", + ), + ]; + for (input, rtk_sub, env_prefix) in cases { + match check_for_hook(input, "claude") { + HookResult::Rewrite(cmd) => { + assert!( + cmd.contains(rtk_sub), + "'{input}' must route to '{rtk_sub}', got '{cmd}'" + ); + assert!( + cmd.contains(env_prefix), + "'{input}' must preserve env prefix '{env_prefix}', got '{cmd}'" + ); + } + other => panic!("Expected Rewrite for '{input}', got {other:?}"), + } + } + } + + #[test] + fn test_env_prefix_multi_var_routes() { + let input = "NODE_ENV=test CI=1 npx vitest run"; + match check_for_hook(input, "claude") { + HookResult::Rewrite(cmd) => { + assert!( + cmd.contains("rtk vitest"), + "must route to rtk vitest, got '{cmd}'" + ); + assert!( + cmd.contains("NODE_ENV=test"), + "must preserve NODE_ENV, got '{cmd}'" + ); + assert!(cmd.contains("CI=1"), "must preserve CI, got '{cmd}'"); + } + other => panic!("Expected Rewrite, got {other:?}"), + } + } + + #[test] + fn test_env_prefix_unknown_cmd_fallback() { + assert_passthrough("VAR=1 unknown_xyz_abc_cmd"); + } + + #[test] + fn test_env_prefix_npm_still_passthrough() { + assert_passthrough("NODE_ENV=test npm run test:e2e"); + } + + #[test] + fn test_env_prefix_docker_compose_passthrough() { + assert_passthrough("COMPOSE_PROJECT_NAME=test docker compose up -d"); + } + + #[test] + fn test_global_options_not_blocked() { + let cases = [ + "git --no-pager status", + "git -C /path/to/project status", + "git -C /path --no-pager log --oneline", + "git --no-optional-locks diff HEAD", + "git --bare log", + "cargo +nightly test", + "cargo +stable build --release", + "docker --context prod ps", + "docker -H tcp://host:2375 images", + "kubectl -n kube-system get pods", + "kubectl --context prod describe pod foo", + ]; + for input in cases { + assert_passthrough(input); + } + } + + #[test] + fn test_specific_commands_not_blocked() { + let cases = [ + "git log --oneline -10", + "git diff HEAD", + "git show abc123", + "git add .", + "gh pr list", + "gh api repos/owner/repo", + "gh release list", + "npm run test:e2e", + "npm run build", + "npm test", + "docker compose up -d", + "docker compose logs postgrest", + "docker compose down", + "docker run --rm postgres", + "docker exec -it db psql", + "kubectl describe pod foo", + "kubectl apply -f deploy.yaml", + "npx playwright test", + "npx prisma migrate", + "cargo test", + "vitest", + "vitest run", + "vitest run --reporter=verbose", + "npx vitest run", + "pnpm vitest run --coverage", + "vue-tsc -b", + "npx vue-tsc --noEmit", + "curl -s https://example.com", + "ls -la", + "grep -rn pattern src/", + "rg pattern src/", + ]; + for input in cases { + assert!( + matches!(check_for_hook(input, "claude"), HookResult::Rewrite(_)), + "'{}' should Rewrite (not Blocked)", + input + ); + } + } + + #[test] + fn test_builtins_not_blocked() { + let cases = [ + "echo hello world", + "cd /tmp", + "mkdir -p foo/bar", + "python3 script.py", + "find . -name '*.ts'", + "tree src/", + "wget https://example.com/file", + ]; + for input in cases { + assert_passthrough(input); + } + assert_passthrough("node -e 'console.log(1)'"); + } + + #[test] + fn test_noglob_prefix_routes_inner_command() { + assert_rewrite("noglob gh pr view 123", "noglob rtk gh pr view 123"); + } + + #[test] + fn test_noglob_prefix_with_unknown_command() { + match check_for_hook("noglob some-unknown-tool --arg", "claude") { + HookResult::Rewrite(cmd) => { + assert!( + !cmd.contains("rtk run -c 'noglob"), + "noglob should not be inside rtk run -c, got '{}'", + cmd + ); + } + HookResult::Blocked(_) => panic!("should not be blocked"), + } + } + + #[test] + fn test_command_prefix_routes_inner_command() { + assert_rewrite("command git status", "command rtk git status"); + } + + #[test] + fn test_builtin_prefix_passthrough() { + match check_for_hook("builtin cd /tmp", "claude") { + HookResult::Rewrite(cmd) => { + assert!( + !cmd.contains("rtk run -c 'builtin"), + "builtin should not be inside rtk run -c, got '{}'", + cmd + ); + } + HookResult::Blocked(_) => panic!("should not be blocked"), + } + } + + #[test] + fn test_nocorrect_prefix_routes_inner_command() { + assert_rewrite("nocorrect git log -10", "nocorrect rtk git log"); + } + + #[test] + fn test_noglob_gh_release_create_exact_bug_report() { + let input = "noglob gh release create v0.3.0-rc1 --title v0.3.0-rc1 --notes test --prerelease --draft"; + match check_for_hook(input, "claude") { + HookResult::Rewrite(cmd) => { + assert!( + !cmd.contains("rtk run -c 'noglob"), + "noglob must not be inside rtk run -c, got '{}'", + cmd + ); + assert!( + cmd.starts_with("noglob "), + "noglob must be the outermost prefix, got '{}'", + cmd + ); + } + HookResult::Blocked(_) => panic!("should not be blocked"), + } + } + + #[test] + fn test_nested_shell_prefixes() { + assert_rewrite("noglob command git status", "noglob command rtk git status"); + } + + #[test] + fn test_shell_prefix_plus_env_prefix() { + assert_rewrite( + "noglob GIT_PAGER=cat git log -10", + "noglob GIT_PAGER=cat rtk git log", + ); + } + + #[test] + fn test_exec_prefix_routes_inner_command() { + assert_rewrite("exec git status", "exec rtk git status"); + } + + #[test] + fn test_bare_shell_prefix_passthrough() { + match check_for_hook("noglob", "claude") { + HookResult::Rewrite(cmd) => { + assert_eq!(cmd, "noglob", "bare prefix should pass through unchanged"); + } + HookResult::Blocked(_) => panic!("should not be blocked"), + } + } + + #[test] + fn test_unknown_command_passthrough() { + assert_passthrough("gh release create v0.3.0 --title test"); + } + + #[test] + fn test_full_path_binary_routes_correctly() { + assert_rewrite("/opt/homebrew/bin/git status", "rtk git status"); + } + + #[test] + fn test_full_path_unknown_command_passthrough() { + assert_passthrough("/opt/homebrew/bin/gh release create v0.3.0"); + } + + #[test] + fn test_env_prefix_unknown_command_passthrough() { + assert_passthrough("GH_DEBUG= gh release create v0.3.0"); + } + + #[test] + fn test_noglob_unknown_command_passthrough() { + assert_passthrough("noglob gh release create v0.3.0"); + } + + #[test] + fn test_chain_mixed_known_unknown() { + match check_for_hook("gh release create v1 && git status", "claude") { + HookResult::Rewrite(cmd) => { + assert!(cmd.contains("rtk run -c"), "chains still need rtk run -c"); + assert!(cmd.contains("rtk git status"), "known cmd routed"); + assert!( + cmd.contains("gh release create v1"), + "unknown cmd preserved" + ); + } + HookResult::Blocked(_) => panic!("should not be blocked"), + } + } + + #[test] + fn test_gh_release_create_exact_bug_report() { + let input = r#"gh release create v0.3.0 --title "ai_session_tools v0.3.0" --notes-file notes/v0.3.0-release.md"#; + assert_passthrough(input); + } + + #[test] + fn test_completely_unknown_binary_passthrough() { + assert_passthrough("some-custom-tool --flag value"); + } + + #[test] + fn test_compound_commands_rewrite() { + let cases = [ + ("cd /tmp && git status", "&&"), + ("cd dir && git status && git diff", "&&"), + ("git add . && git commit -m msg", "&&"), + ("echo start ; git status ; echo done", ";"), + ("git pull || echo failed", "||"), + ]; + for (input, operator) in cases { + match check_for_hook(input, "claude") { + HookResult::Rewrite(cmd) => { + assert!(cmd.contains("rtk run"), "'{input}' should rewrite"); + assert!( + cmd.contains(operator), + "'{input}' must preserve '{operator}', got '{cmd}'" + ); + } + other => panic!("Expected Rewrite for '{input}', got {other:?}"), + } + } + } + + #[test] + fn test_compound_quoted_operators_not_split() { + let input = r#"git commit -m "Fix && Bug""#; + match check_for_hook(input, "claude") { + HookResult::Rewrite(cmd) => { + assert!( + cmd.contains("rtk git commit"), + "Quoted && must not split; should route to rtk git commit, got '{cmd}'" + ); + } + other => panic!("Expected Rewrite for quoted &&, got {other:?}"), + } + } + + #[test] + fn test_suffix_2_redirect_routes_to_rtk() { + let input = "cargo test 2>&1"; + match check_for_hook(input, "claude") { + HookResult::Rewrite(cmd) => { + assert!( + cmd.contains("rtk cargo"), + "must use rtk cargo filter, got '{cmd}'" + ); + assert!( + cmd.contains("2>&1"), + "must preserve 2>&1 suffix, got '{cmd}'" + ); + assert!( + !cmd.contains("rtk run -c"), + "must NOT fall back to passthrough, got '{cmd}'" + ); + } + other => panic!("Expected Rewrite, got {other:?}"), + } + } + + #[test] + fn test_suffix_dev_null_routes_to_rtk() { + let input = "cargo test 2>/dev/null"; + match check_for_hook(input, "claude") { + HookResult::Rewrite(cmd) => { + assert!(cmd.contains("rtk cargo"), "must use rtk cargo, got '{cmd}'"); + assert!( + cmd.contains("/dev/null"), + "must preserve /dev/null suffix, got '{cmd}'" + ); + assert!( + !cmd.contains("rtk run -c"), + "must NOT fall back to passthrough, got '{cmd}'" + ); + } + other => panic!("Expected Rewrite, got {other:?}"), + } + } + + #[test] + fn test_suffix_pipe_tee_routes_to_rtk() { + let input = "cargo test | tee /tmp/log.txt"; + match check_for_hook(input, "claude") { + HookResult::Rewrite(cmd) => { + assert!( + cmd.contains("rtk cargo"), + "must use rtk cargo filter, got '{cmd}'" + ); + assert!(cmd.contains("tee"), "must preserve tee suffix, got '{cmd}'"); + } + other => panic!("Expected Rewrite, got {other:?}"), + } + } + + #[test] + fn test_suffix_pipe_head_routes_to_rtk() { + let input = "git log | head -20"; + match check_for_hook(input, "claude") { + HookResult::Rewrite(cmd) => { + assert!(cmd.contains("rtk git"), "must use rtk git, got '{cmd}'"); + assert!( + cmd.contains("head"), + "must preserve head suffix, got '{cmd}'" + ); + assert!( + !cmd.contains("rtk run -c"), + "must NOT fall back to passthrough, got '{cmd}'" + ); + } + other => panic!("Expected Rewrite, got {other:?}"), + } + } + + #[test] + fn test_suffix_unknown_cmd_still_passthrough() { + assert_passthrough("unknown_xyz_cmd 2>&1"); + } + + #[test] + fn test_suffix_unsafe_pipe_still_passthrough() { + let input = "cargo test | grep FAILED"; + match check_for_hook(input, "claude") { + HookResult::Rewrite(cmd) => { + let _ = cmd; + } + other => panic!("Expected Rewrite, got {other:?}"), + } + } + + #[test] + fn test_token_waste_allowed_in_pipelines() { + let cases = [ + "cat file.txt | grep pattern", + "cat file.txt > output.txt", + "sed 's/old/new/' file.txt > output.txt", + "head -n 10 file.txt | grep pattern", + "for f in *.txt; do cat \"$f\" | grep x; done", + ]; + for input in cases { + assert_rewrite(input, "rtk run"); + } + } + + #[test] + fn test_different_agents_same_result() { + for agent in ["claude", "gemini"] { + match check_for_hook("git status", agent) { + HookResult::Rewrite(_) => {} + other => panic!("Expected Rewrite for agent '{}', got {:?}", agent, other), + } + } + } + + #[test] + fn test_format_for_claude() { + let (output, success, code) = + format_for_claude(HookResult::Rewrite("rtk run -c 'git status'".to_string())); + assert_eq!(output, "rtk run -c 'git status'"); + assert!(success); + assert_eq!(code, 0); + + let (output, success, code) = + format_for_claude(HookResult::Blocked("Error message".to_string())); + assert_eq!(output, "Error message"); + assert!(!success); + assert_eq!(code, 2); + } + + #[test] + fn test_dollar_var_routes_natively() { + let result = match check_for_hook("git log $BRANCH", "claude") { + HookResult::Rewrite(cmd) => cmd, + other => panic!("Expected Rewrite, got {:?}", other), + }; + assert!( + result.contains("rtk git"), + "Expected rtk git routing for 'git log $BRANCH', got: {}", + result + ); + assert!( + !result.contains("rtk run"), + "Should not fall to passthrough for simple $VAR, got: {}", + result + ); + } + + #[test] + fn test_dollar_subshell_still_passthrough() { + let result = match check_for_hook("git log $(git rev-parse HEAD)", "claude") { + HookResult::Rewrite(cmd) => cmd, + other => panic!("Expected Rewrite, got {:?}", other), + }; + assert!( + result.contains("rtk run"), + "Subshell $(...) must route to passthrough, got: {}", + result + ); + } + + #[test] + fn test_rewrite_depth_limit_blocked() { + assert_blocked("echo hello", MAX_REWRITE_DEPTH, "loop"); + } + + #[test] + fn test_rewrite_depth_limit_allowed() { + match check_for_hook_inner("echo hello", 0) { + HookResult::Rewrite(cmd) => assert_eq!(cmd, "echo hello"), + _ => panic!("Expected Rewrite at depth 0"), + } + } + + #[test] + fn test_claude_rewrite_exit_code_is_zero() { + let (_, _, code) = format_for_claude(HookResult::Rewrite("rtk run -c 'ls'".into())); + assert_eq!(code, 0, "Rewrite must exit 0 (success)"); + } + + #[test] + fn test_claude_block_exit_code_is_two() { + let (_, _, code) = format_for_claude(HookResult::Blocked("denied".into())); + assert_eq!( + code, 2, + "Block must exit 2 (blocking error per Claude Code spec)" + ); + } + + #[test] + fn test_claude_rewrite_output_is_command_text() { + let (output, success, _) = + format_for_claude(HookResult::Rewrite("rtk run -c 'git status'".into())); + assert_eq!(output, "rtk run -c 'git status'"); + assert!(success); + assert!( + !output.starts_with('{'), + "Rewrite output must be plain text, not JSON" + ); + } + + #[test] + fn test_claude_block_output_is_human_message() { + let (output, success, _) = + format_for_claude(HookResult::Blocked("Use Read tool instead".into())); + assert_eq!(output, "Use Read tool instead"); + assert!(!success); + assert!( + !output.starts_with('{'), + "Block output must be plain text, not JSON" + ); + } + + #[test] + fn test_claude_rewrite_success_flag_true() { + let (_, success, _) = format_for_claude(HookResult::Rewrite("cmd".into())); + assert!(success, "Rewrite must set success=true"); + } + + #[test] + fn test_claude_block_success_flag_false() { + let (_, success, _) = format_for_claude(HookResult::Blocked("msg".into())); + assert!(!success, "Block must set success=false"); + } + + #[test] + fn test_claude_exit_codes_not_one() { + let (_, _, rewrite_code) = format_for_claude(HookResult::Rewrite("cmd".into())); + let (_, _, block_code) = format_for_claude(HookResult::Blocked("msg".into())); + assert_ne!( + rewrite_code, 1, + "Exit code 1 is non-blocking error, not valid for rewrite" + ); + assert_ne!( + block_code, 1, + "Exit code 1 is non-blocking error, not valid for block" + ); + } + + #[test] + fn test_cross_protocol_safe_command_allowed_by_both() { + for cmd in ["git status", "cargo test", "ls -la", "echo hello"] { + let claude = check_for_hook(cmd, "claude"); + let gemini = check_for_hook(cmd, "gemini"); + match (&claude, &gemini) { + (HookResult::Rewrite(_), HookResult::Rewrite(_)) => {} + _ => panic!( + "'{}': Claude={:?}, Gemini={:?} -- both should Rewrite", + cmd, claude, gemini + ), + } + } + } + + #[test] + fn test_routing_native_commands() { + let cases = [ + ("git status", "rtk git status"), + ("git log --oneline -10", "rtk git log --oneline -10"), + ("git diff HEAD", "rtk git diff HEAD"), + ("git add .", "rtk git add ."), + ("git commit -m msg", "rtk git commit"), + ("gh pr view 156", "rtk gh pr view 156"), + ("cargo test", "rtk cargo test"), + ( + "cargo clippy --all-targets", + "rtk cargo clippy --all-targets", + ), + ("grep -r pattern src/", "rtk grep -r pattern src/"), + ("rg pattern src/", "rtk grep pattern src/"), + ("ls -la", "rtk ls -la"), + ("vitest", "rtk vitest run"), + ("vitest run", "rtk vitest run"), + ("vitest run --coverage", "rtk vitest run --coverage"), + ("pnpm test", "rtk vitest run"), + ("pnpm vitest", "rtk vitest run"), + ("pnpm lint", "rtk lint"), + ("pnpm eslint src/", "rtk lint"), + ("pnpm eslint .", "rtk lint ."), + ("pnpm eslint --fix src/", "rtk lint"), + ("npx tsc --noEmit", "rtk tsc --noEmit"), + ("python -m pytest tests/", "rtk pytest tests/"), + ("uv pip list", "rtk pip list"), + ("go test ./...", "rtk go test ./..."), + ("go build ./...", "rtk go build ./..."), + ("go vet ./...", "rtk go vet ./..."), + ("eslint src/", "rtk lint src/"), + ("tsc --noEmit", "rtk tsc --noEmit"), + ("prettier src/", "rtk prettier src/"), + ("playwright test", "rtk playwright test"), + ("prisma migrate dev", "rtk prisma migrate dev"), + ( + "curl https://api.example.com", + "rtk curl https://api.example.com", + ), + ("pytest tests/", "rtk pytest tests/"), + ("pytest -x tests/unit", "rtk pytest -x tests/unit"), + ("golangci-lint run ./...", "rtk golangci-lint run ./..."), + ("docker ps", "rtk docker ps"), + ("docker images", "rtk docker images"), + ("docker logs mycontainer", "rtk docker logs mycontainer"), + ("kubectl get pods", "rtk kubectl get pods"), + ("kubectl logs mypod", "rtk kubectl logs mypod"), + ("ruff check src/", "rtk ruff check src/"), + ("ruff format src/", "rtk ruff format src/"), + ("pip list", "rtk pip list"), + ("pip install requests", "rtk pip install requests"), + ("pip outdated", "rtk pip outdated"), + ("pip show requests", "rtk pip show requests"), + ("gh issue list", "rtk gh issue list"), + ("gh run view 123", "rtk gh run view 123"), + ("git stash pop", "rtk git stash pop"), + ("git fetch origin", "rtk git fetch origin"), + ("gt log", "rtk gt log"), + ("gt submit", "rtk gt submit"), + ("gt sync", "rtk gt sync"), + ("gt create feat/new-branch", "rtk gt create feat/new-branch"), + ]; + for (input, expected) in cases { + assert_rewrite(input, expected); + } + } + + #[test] + fn test_routing_subcommand_filter_fallback() { + let cases = [ + "docker build .", + "docker run -it nginx", + "kubectl apply -f dep.yaml", + "kubectl delete pod mypod", + "go mod tidy", + "go generate ./...", + "ruff lint src/", + "pip freeze", + "pip uninstall requests", + "cargo publish", + "cargo run", + "git rebase -i HEAD~3", + "git cherry-pick abc123", + "gh repo clone foo/bar", + ]; + for input in cases { + assert_passthrough(input); + } + } + + #[test] + fn test_routing_vitest_no_double_run() { + // ISSUE #112: shell script sed bug produces "rtk vitest run run --coverage" + let result = match check_for_hook("pnpm vitest run --coverage", "claude") { + HookResult::Rewrite(cmd) => cmd, + other => panic!("Expected Rewrite, got {:?}", other), + }; + assert_rewrite("pnpm vitest run --coverage", "rtk vitest run --coverage"); + assert!( + !result.contains("run run"), + "Must not double 'run' in output: '{}'", + result + ); + } + + #[test] + fn test_routing_fallbacks_to_rtk_run() { + let chain_cases = ["git add . && git commit -m msg", "git log | grep fix"]; + for input in chain_cases { + assert_rewrite(input, "rtk run -c"); + } + let passthrough_cases = [ + "git checkout main", + "tail -n 20 file.txt", + "tail -f server.log", + ]; + for input in passthrough_cases { + assert_passthrough(input); + } + } + + #[test] + fn test_cross_agent_routing_identical() { + for cmd in ["git status", "cargo test", "ls -la"] { + let claude_result = check_for_hook(cmd, "claude"); + let gemini_result = check_for_hook(cmd, "gemini"); + match (&claude_result, &gemini_result) { + (HookResult::Rewrite(c), HookResult::Rewrite(g)) => { + assert_eq!(c, g, "claude and gemini must route '{}' identically", cmd); + assert!( + !c.contains("rtk run -c"), + "'{}' should not fall back to rtk run -c", + cmd + ); + } + _ => panic!( + "'{}' should Rewrite for both agents: claude={:?} gemini={:?}", + cmd, claude_result, gemini_result + ), + } + } + } + + #[test] + fn test_chain_both_commands_substituted() { + let result = match check_for_hook("cargo test && git log", "claude") { + HookResult::Rewrite(cmd) => cmd, + other => panic!("Expected Rewrite, got {:?}", other), + }; + assert!( + result.contains("rtk cargo"), + "cargo test must be substituted to rtk cargo inside chain: {}", + result + ); + assert!( + result.contains("rtk git"), + "git log must be substituted to rtk git inside chain: {}", + result + ); + assert!( + result.contains("rtk run"), + "chain still needs shell wrapper (rtk run -c): {}", + result + ); + } + + #[test] + fn test_chain_with_dollar_var_substituted() { + let result = match check_for_hook("cargo test && git log $BRANCH", "claude") { + HookResult::Rewrite(cmd) => cmd, + other => panic!("Expected Rewrite, got {:?}", other), + }; + assert!( + result.contains("rtk cargo"), + "cargo test must be rtk in chain: {}", + result + ); + assert!( + result.contains("rtk git log"), + "git log $BRANCH must be rtk with var preserved: {}", + result + ); + assert!( + result.contains("$BRANCH"), + "$BRANCH must be preserved in rewritten chain: {}", + result + ); + } + + #[test] + fn test_chain_unknown_command_not_substituted() { + let result = match check_for_hook("cargo test && unknown_xyz_cmd", "claude") { + HookResult::Rewrite(cmd) => cmd, + other => panic!("Expected Rewrite, got {:?}", other), + }; + assert!( + result.contains("rtk cargo"), + "cargo test must be substituted to rtk: {}", + result + ); + assert!( + result.contains("unknown_xyz_cmd"), + "unknown command must pass through unchanged: {}", + result + ); + assert!( + !result.contains("rtk unknown"), + "must not invent rtk subcommands for unknown binary: {}", + result + ); + } + + #[test] + fn test_semicolon_chain_substituted() { + let result = match check_for_hook("cargo test ; git status", "claude") { + HookResult::Rewrite(cmd) => cmd, + other => panic!("Expected Rewrite, got {:?}", other), + }; + assert!( + result.contains("rtk cargo"), + "cargo must be rtk in semicolon chain: {}", + result + ); + assert!( + result.contains("rtk git"), + "git must be rtk in semicolon chain: {}", + result + ); + } + + #[test] + fn test_or_chain_substituted() { + let result = match check_for_hook("cargo test || go test ./...", "claude") { + HookResult::Rewrite(cmd) => cmd, + other => panic!("Expected Rewrite, got {:?}", other), + }; + assert!( + result.contains("rtk cargo"), + "cargo must be rtk in || chain: {}", + result + ); + assert!( + result.contains("rtk go"), + "go must be rtk in || chain: {}", + result + ); + } + + #[test] + fn test_format_preserving_contains_expected() { + assert!( + FORMAT_PRESERVING.contains(&"tail"), + "tail is format-preserving (line-per-line passthrough)" + ); + assert!( + FORMAT_PRESERVING.contains(&"echo"), + "echo is format-preserving (output equals input)" + ); + assert!( + FORMAT_PRESERVING.contains(&"find"), + "find is format-preserving (path-per-line)" + ); + assert!( + FORMAT_PRESERVING.contains(&"cat"), + "cat is format-preserving (byte passthrough)" + ); + } + + #[test] + fn test_format_changing_not_in_format_preserving() { + assert!( + !FORMAT_PRESERVING.contains(&"cargo"), + "cargo test compresses output -- not format-preserving" + ); + assert!( + !FORMAT_PRESERVING.contains(&"git"), + "git log/diff compresses output -- not format-preserving" + ); + assert!( + !FORMAT_PRESERVING.contains(&"pytest"), + "pytest compresses output -- not format-preserving" + ); + assert!( + !FORMAT_PRESERVING.contains(&"go"), + "go test compresses output -- not format-preserving" + ); + } + + #[test] + fn test_transparent_sinks_contains_expected() { + assert!( + TRANSPARENT_SINKS.contains(&"tee"), + "tee is a transparent sink (copies stdin to file + stdout)" + ); + assert!( + TRANSPARENT_SINKS.contains(&"head"), + "head is a transparent sink (truncates lines)" + ); + assert!( + TRANSPARENT_SINKS.contains(&"cat"), + "cat is a transparent sink (passes through)" + ); + assert!( + TRANSPARENT_SINKS.contains(&"tail"), + "tail is a transparent sink (last N lines)" + ); + } + + fn count_tokens(text: &str) -> usize { + text.split_whitespace().count() + } + + fn exec(cmd: &str) -> String { + let parts: Vec<&str> = cmd.split_whitespace().collect(); + let out = std::process::Command::new(parts[0]) + .args(&parts[1..]) + .output() + .unwrap_or_else(|e| panic!("failed to exec '{cmd}': {e}")); + String::from_utf8_lossy(&out.stdout).to_string() + } + + #[test] + #[ignore = "requires installed rtk binary (cargo install --path .) and git repo"] + fn test_e2e_git_status_saves_tokens() { + let raw_cmd = "git status"; + let rtk_cmd = match check_for_hook(raw_cmd, "claude") { + HookResult::Rewrite(cmd) => cmd, + other => panic!("Expected Rewrite for '{raw_cmd}', got {other:?}"), + }; + assert!( + rtk_cmd.starts_with("rtk git"), + "lexer+router should produce rtk git status, got: {rtk_cmd}" + ); + + let raw_out = exec(raw_cmd); + let rtk_out = exec(&rtk_cmd); + let raw_tok = count_tokens(&raw_out); + let rtk_tok = count_tokens(&rtk_out); + assert!(raw_tok > 0, "raw git status produced no output"); + + let savings = 100.0 * (1.0 - rtk_tok as f64 / raw_tok as f64); + assert!( + savings >= 40.0, + "rtk git status should save >=40% tokens vs raw git status, \ + got {savings:.1}% ({raw_tok} raw -> {rtk_tok} rtk tokens)" + ); + } + + #[test] + #[ignore = "requires installed rtk binary (cargo install --path .) and directory with files"] + fn test_e2e_ls_saves_tokens() { + let raw_cmd = "ls -la ."; + let rtk_cmd = match check_for_hook(raw_cmd, "claude") { + HookResult::Rewrite(cmd) => cmd, + other => panic!("Expected Rewrite for '{raw_cmd}', got {other:?}"), + }; + assert!( + rtk_cmd.starts_with("rtk ls"), + "lexer+router should produce rtk ls, got: {rtk_cmd}" + ); + + let raw_out = exec(raw_cmd); + let rtk_out = exec(&rtk_cmd); + let raw_tok = count_tokens(&raw_out); + let rtk_tok = count_tokens(&rtk_out); + assert!(raw_tok > 0, "raw ls -la produced no output"); + + let savings = 100.0 * (1.0 - rtk_tok as f64 / raw_tok as f64); + assert!( + savings >= 40.0, + "rtk ls should save >=40% tokens vs raw ls -la, \ + got {savings:.1}% ({raw_tok} raw -> {rtk_tok} rtk tokens)" + ); + } + + #[test] + #[ignore = "requires installed rtk binary (cargo install --path .) and git repo with history"] + fn test_e2e_git_log_saves_tokens() { + let raw_cmd = "git log --oneline -20"; + let rtk_cmd = match check_for_hook(raw_cmd, "claude") { + HookResult::Rewrite(cmd) => cmd, + other => panic!("Expected Rewrite for '{raw_cmd}', got {other:?}"), + }; + assert!( + rtk_cmd.starts_with("rtk git"), + "lexer+router should produce rtk git log, got: {rtk_cmd}" + ); + + let raw_out = exec(raw_cmd); + let rtk_out = exec(&rtk_cmd); + let raw_tok = count_tokens(&raw_out); + let rtk_tok = count_tokens(&rtk_out); + assert!( + raw_tok > 0, + "raw git log produced no output -- need a repo with commits" + ); + + let ratio = rtk_tok as f64 / raw_tok.max(1) as f64; + assert!( + ratio <= 1.05, + "rtk git log must not significantly bloat output vs raw git log \ + ({raw_tok} raw -> {rtk_tok} rtk, ratio {ratio:.2})" + ); + } + + #[test] + fn test_cat_multi_file_rewrites_to_rtk_read() { + let result = check_for_hook("cat file1.txt file2.txt", "claude"); + assert!( + matches!(&result, HookResult::Rewrite(s) if s == "rtk read file1.txt file2.txt"), + "cat (multi-file) must rewrite to rtk read on this branch; got: {:?}", + result + ); + } + + #[test] + fn test_cat_single_file_rewrites_to_rtk_read() { + let result = check_for_hook("cat CLAUDE.md", "claude"); + assert!( + matches!(&result, HookResult::Rewrite(s) if s == "rtk read CLAUDE.md"), + "cat (single-file) must rewrite to rtk read on this branch; got: {:?}", + result + ); + } + + // ISSUE #196: gh --json/--jq/--template passthrough + #[test] + fn test_gh_json_flag_passes_through() { + assert!(should_passthrough("gh pr list --json number,title")); + assert!(should_passthrough( + "gh pr list --json number --jq '.[].number'" + )); + assert!(should_passthrough("gh pr view 42 --template '{{.title}}'")); + assert!(should_passthrough("gh api repos/owner/repo --jq '.name'")); + } + + #[test] + fn test_gh_without_json_not_passthrough() { + assert!(!should_passthrough("gh pr list")); + assert!(!should_passthrough("gh issue list")); + } + + #[test] + fn test_hook_lookup_git_branch() { + assert_eq!(hook_lookup("git", "branch"), Some(("rtk git", "git"))); + } + + #[test] + fn test_hook_lookup_git_worktree() { + assert_eq!(hook_lookup("git", "worktree"), Some(("rtk git", "git"))); + } + + #[test] + fn test_git_branch_routes_via_hook() { + assert_rewrite("git branch", "rtk git branch"); + } + + #[test] + fn test_git_worktree_list_routes_via_hook() { + assert_rewrite("git worktree list", "rtk git worktree"); + } +} diff --git a/src/cmd/mod.rs b/src/cmd/mod.rs new file mode 100644 index 000000000..fff0e85ce --- /dev/null +++ b/src/cmd/mod.rs @@ -0,0 +1,19 @@ +pub(crate) mod analysis; +pub(crate) mod predicates; + +pub(crate) mod builtins; +pub(crate) mod filters; + +pub mod exec; +pub mod hook; + +// Re-export existing lexer from discover module +pub(crate) mod lexer { + pub use crate::discover::lexer::*; +} + +#[cfg(test)] +pub(crate) mod test_helpers; + +pub use exec::execute; +pub use hook::check_for_hook; diff --git a/src/cmd/predicates.rs b/src/cmd/predicates.rs new file mode 100644 index 000000000..034d7fc7d --- /dev/null +++ b/src/cmd/predicates.rs @@ -0,0 +1,75 @@ +use std::process::Command; + +pub(crate) fn has_unstaged_changes() -> bool { + Command::new("git") + .args(["diff", "--quiet"]) + .status() + .map(|s| !s.success()) + .unwrap_or(false) +} + +pub(crate) fn is_interactive() -> bool { + use std::io::IsTerminal; + std::io::stderr().is_terminal() +} + +pub(crate) fn expand_tilde(path: &str) -> String { + if path.starts_with('~') { + let home = std::env::var("HOME") + .or_else(|_| std::env::var("USERPROFILE")) + .unwrap_or_else(|_| "/".to_string()); + path.replacen('~', &home, 1) + } else { + path.to_string() + } +} + +pub(crate) fn get_home() -> String { + std::env::var("HOME") + .or_else(|_| std::env::var("USERPROFILE")) + .unwrap_or_else(|_| "/".to_string()) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::env; + + #[test] + fn test_expand_tilde_simple() { + let home = env::var("HOME").unwrap_or("/".to_string()); + assert_eq!(expand_tilde("~/src"), format!("{}/src", home)); + } + + #[test] + fn test_expand_tilde_no_tilde() { + assert_eq!(expand_tilde("/absolute/path"), "/absolute/path"); + } + + #[test] + fn test_expand_tilde_only_tilde() { + let home = env::var("HOME").unwrap_or("/".to_string()); + assert_eq!(expand_tilde("~"), home); + } + + #[test] + fn test_expand_tilde_relative() { + assert_eq!(expand_tilde("relative/path"), "relative/path"); + } + + #[test] + fn test_get_home_returns_something() { + let home = get_home(); + assert!(!home.is_empty()); + } + + #[test] + fn test_is_interactive_returns_false_in_tests() { + assert!(!is_interactive()); + } + + #[test] + fn test_has_unstaged_changes_does_not_panic() { + let _result: bool = has_unstaged_changes(); + } +} diff --git a/src/cmd/test_helpers.rs b/src/cmd/test_helpers.rs new file mode 100644 index 000000000..14b1f051a --- /dev/null +++ b/src/cmd/test_helpers.rs @@ -0,0 +1,32 @@ +use std::sync::{Mutex, MutexGuard, OnceLock}; + +static ENV_LOCK: OnceLock> = OnceLock::new(); + +pub struct EnvGuard { + _lock: MutexGuard<'static, ()>, +} + +impl EnvGuard { + pub fn new() -> Self { + let lock = ENV_LOCK + .get_or_init(|| Mutex::new(())) + .lock() + .unwrap_or_else(|e| e.into_inner()); + Self::cleanup(); + Self { _lock: lock } + } + + fn cleanup() { + std::env::remove_var("RTK_SAFE_COMMANDS"); + std::env::remove_var("RTK_BLOCK_TOKEN_WASTE"); + std::env::remove_var("RTK_ACTIVE"); + std::env::remove_var("RTK_DB_PATH"); + std::env::remove_var("RTK_HOOK_ENABLED"); + } +} + +impl Drop for EnvGuard { + fn drop(&mut self) { + Self::cleanup(); + } +} diff --git a/src/cmds/git/git.rs b/src/cmds/git/git.rs index 8d3d0eb4b..cc2cedf33 100644 --- a/src/cmds/git/git.rs +++ b/src/cmds/git/git.rs @@ -512,7 +512,7 @@ fn parse_user_limit(args: &[String]) -> Option { /// so we skip line capping (git already returns exactly N commits) and use a /// wider truncation threshold (120 chars) to preserve commit context that LLMs /// need for rebase/squash operations. -fn filter_log_output( +pub(crate) fn filter_log_output( output: &str, limit: usize, user_set_limit: bool, @@ -588,8 +588,7 @@ fn truncate_line(line: &str, width: usize) -> String { } } -/// Format porcelain output into compact RTK status display -fn format_status_output(porcelain: &str) -> String { +pub(crate) fn format_status_output(porcelain: &str) -> String { let lines: Vec<&str> = porcelain.lines().collect(); if lines.is_empty() { @@ -1272,6 +1271,7 @@ fn filter_branch_output(output: &str) -> String { let mut current = String::new(); let mut local: Vec = Vec::new(); let mut remote: Vec = Vec::new(); + let mut seen_remote: std::collections::HashSet = std::collections::HashSet::new(); for line in output.lines() { let line = line.trim(); @@ -1281,13 +1281,16 @@ fn filter_branch_output(output: &str) -> String { if let Some(branch) = line.strip_prefix("* ") { current = branch.to_string(); - } else if line.starts_with("remotes/origin/") { - let branch = line.strip_prefix("remotes/origin/").unwrap_or(line); - // Skip HEAD pointer - if branch.starts_with("HEAD ") { - continue; + } else if let Some(rest) = line.strip_prefix("remotes/") { + if let Some(slash_pos) = rest.find('/') { + let branch = &rest[slash_pos + 1..]; + if branch.starts_with("HEAD ") { + continue; + } + if seen_remote.insert(branch.to_string()) { + remote.push(branch.to_string()); + } } - remote.push(branch.to_string()); } else { local.push(line.to_string()); } @@ -1303,7 +1306,6 @@ fn filter_branch_output(output: &str) -> String { } if !remote.is_empty() { - // Filter out remotes that already exist locally let remote_only: Vec<&String> = remote .iter() .filter(|r| *r != ¤t && !local.contains(r)) @@ -1806,6 +1808,37 @@ mod tests { assert!(!result.contains("remote-only")); } + #[test] + fn test_filter_branch_multi_remote() { + let output = "* main\n develop\n remotes/origin/HEAD -> origin/main\n remotes/origin/main\n remotes/origin/feature-x\n remotes/upstream/main\n remotes/upstream/release-v3\n remotes/fork/main\n remotes/fork/experiment\n"; + let result = filter_branch_output(output); + assert!(result.contains("* main")); + assert!(result.contains("develop")); + assert!(result.contains("feature-x"), "origin branch shown: {}", result); + assert!( + result.contains("release-v3"), + "upstream branch shown: {}", + result + ); + assert!( + result.contains("experiment"), + "fork branch shown: {}", + result + ); + assert!( + !result.contains("remotes/"), + "remote prefix stripped: {}", + result + ); + let main_count = result.matches("main").count(); + assert!( + main_count <= 2, + "main deduplicated across remotes (found {} occurrences): {}", + main_count, + result + ); + } + #[test] fn test_filter_stash_list() { let output = diff --git a/src/cmds/go/go_cmd.rs b/src/cmds/go/go_cmd.rs index 5935f9b0d..b4276b332 100644 --- a/src/cmds/go/go_cmd.rs +++ b/src/cmds/go/go_cmd.rs @@ -284,8 +284,7 @@ fn run_go_tool_golangci_lint(args: &[OsString], verbose: u8) -> Result { Ok(if exit_code == 1 { 0 } else { exit_code }) } -/// Parse go test -json output (NDJSON format) -fn filter_go_test_json(output: &str) -> String { +pub(crate) fn filter_go_test_json(output: &str) -> String { let mut packages: HashMap = HashMap::new(); let mut current_test_output: HashMap<(String, String), Vec> = HashMap::new(); // (package, test) -> outputs let mut build_output: HashMap> = HashMap::new(); // import_path -> error lines @@ -465,8 +464,7 @@ fn filter_go_test_json(output: &str) -> String { result.trim().to_string() } -/// Filter go build output - show only errors -fn filter_go_build(output: &str) -> String { +pub(crate) fn filter_go_build(output: &str) -> String { let mut errors: Vec = Vec::new(); for line in output.lines() { diff --git a/src/cmds/js/tsc_cmd.rs b/src/cmds/js/tsc_cmd.rs index ed64fd09e..20d1e7aba 100644 --- a/src/cmds/js/tsc_cmd.rs +++ b/src/cmds/js/tsc_cmd.rs @@ -36,8 +36,7 @@ pub fn run(args: &[String], verbose: u8) -> Result { ) } -/// Filter TypeScript compiler output - group errors by file, show every error -fn filter_tsc_output(output: &str) -> String { +pub(crate) fn filter_tsc_output(output: &str) -> String { lazy_static::lazy_static! { // Pattern: src/file.ts(12,5): error TS2322: Type 'string' is not assignable to type 'number'. static ref TSC_ERROR: Regex = Regex::new( diff --git a/src/cmds/python/pytest_cmd.rs b/src/cmds/python/pytest_cmd.rs index 3e8cde4ed..83d18dae9 100644 --- a/src/cmds/python/pytest_cmd.rs +++ b/src/cmds/python/pytest_cmd.rs @@ -48,8 +48,7 @@ pub fn run(args: &[String], verbose: u8) -> Result { ) } -/// Parse pytest output using state machine -fn filter_pytest_output(output: &str) -> String { +pub(crate) fn filter_pytest_output(output: &str) -> String { let mut state = ParseState::Header; let mut test_files: Vec = Vec::new(); let mut failures: Vec = Vec::new(); diff --git a/src/cmds/rust/cargo_cmd.rs b/src/cmds/rust/cargo_cmd.rs index f0a37e71c..427fed76b 100644 --- a/src/cmds/rust/cargo_cmd.rs +++ b/src/cmds/rust/cargo_cmd.rs @@ -732,8 +732,7 @@ impl AggregatedTestResult { } } -/// Filter cargo test output - show failures + summary only -fn filter_cargo_test(output: &str) -> String { +pub(crate) fn filter_cargo_test(output: &str) -> String { let mut failures: Vec = Vec::new(); let mut summary_lines: Vec = Vec::new(); let mut in_failure_section = false; diff --git a/src/cmds/system/pipe_cmd.rs b/src/cmds/system/pipe_cmd.rs new file mode 100644 index 000000000..5736d173d --- /dev/null +++ b/src/cmds/system/pipe_cmd.rs @@ -0,0 +1,450 @@ +use anyhow::Result; +use std::io::Read; + +pub fn resolve_filter(name: &str) -> Option String> { + match name { + "cargo-test" | "cargo" => Some(crate::cmds::rust::cargo_cmd::filter_cargo_test), + "pytest" => Some(crate::cmds::python::pytest_cmd::filter_pytest_output), + "go-test" => Some(go_test_wrapper), + "go-build" => Some(crate::cmds::go::go_cmd::filter_go_build), + "tsc" => Some(crate::cmds::js::tsc_cmd::filter_tsc_output), + "vitest" => Some(vitest_wrapper), + "grep" | "rg" => Some(grep_wrapper), + "find" | "fd" => Some(find_wrapper), + "git-log" => Some(git_log_wrapper), + "git-diff" => Some(git_diff_wrapper), + "git-status" => Some(crate::cmds::git::git::format_status_output), + "mypy" => Some(crate::cmds::python::mypy_cmd::filter_mypy_output), + "ruff-check" => Some(crate::cmds::python::ruff_cmd::filter_ruff_check_json), + "ruff-format" => Some(crate::cmds::python::ruff_cmd::filter_ruff_format), + "prettier" => Some(crate::cmds::js::prettier_cmd::filter_prettier_output), + _ => None, + } +} + +fn go_test_wrapper(input: &str) -> String { + crate::cmds::go::go_cmd::filter_go_test_json(input) +} + +fn git_log_wrapper(input: &str) -> String { + crate::cmds::git::git::filter_log_output(input, 50, false, false) +} + +fn git_diff_wrapper(input: &str) -> String { + crate::cmds::git::git::compact_diff(input, 200) +} + +fn vitest_wrapper(input: &str) -> String { + use crate::cmds::js::vitest_cmd::VitestParser; + use crate::parser::{FormatMode, OutputParser, TokenFormatter}; + let result = VitestParser::parse(input); + match result { + crate::parser::ParseResult::Full(data) => data.format(FormatMode::Compact), + crate::parser::ParseResult::Degraded(data, _) => data.format(FormatMode::Compact), + crate::parser::ParseResult::Passthrough(raw) => raw, + } +} + +fn grep_wrapper(input: &str) -> String { + use std::collections::HashMap; + + let mut by_file: HashMap<&str, Vec<(&str, &str)>> = HashMap::new(); + let mut total = 0; + + for line in input.lines() { + let parts: Vec<&str> = line.splitn(3, ':').collect(); + if parts.len() == 3 { + if let Ok(_line_num) = parts[1].parse::() { + total += 1; + by_file.entry(parts[0]).or_default().push((parts[1], parts[2])); + } + } + } + + if total == 0 { + return input.to_string(); + } + + let mut out = format!("{} matches in {}F:\n\n", total, by_file.len()); + let mut files: Vec<_> = by_file.iter().collect(); + files.sort_by_key(|(f, _)| *f); + + for (file, matches) in files { + out.push_str(&format!("[file] {} ({}):\n", file, matches.len())); + for (line_num, content) in matches.iter().take(10) { + out.push_str(&format!(" {:>4}: {}\n", line_num, content.trim())); + } + if matches.len() > 10 { + out.push_str(&format!(" +{}\n", matches.len() - 10)); + } + out.push('\n'); + } + + out +} + +fn find_wrapper(input: &str) -> String { + use std::collections::HashMap; + + let paths: Vec<&str> = input.lines().filter(|l| !l.trim().is_empty()).collect(); + + if paths.is_empty() { + return input.to_string(); + } + + let mut by_dir: HashMap<&str, Vec<&str>> = HashMap::new(); + + for path in &paths { + let dir = match path.rfind('/') { + Some(pos) => &path[..pos], + None => ".", + }; + let name = match path.rfind('/') { + Some(pos) => &path[pos + 1..], + None => path, + }; + by_dir.entry(dir).or_default().push(name); + } + + let mut out = format!("{} files in {} dirs:\n\n", paths.len(), by_dir.len()); + let mut dirs: Vec<_> = by_dir.iter().collect(); + dirs.sort_by_key(|(d, _)| *d); + + for (dir, files) in dirs.iter().take(20) { + out.push_str(&format!("{}/ ({})\n", dir, files.len())); + for f in files.iter().take(10) { + out.push_str(&format!(" {}\n", f)); + } + if files.len() > 10 { + out.push_str(&format!(" +{}\n", files.len() - 10)); + } + } + + if dirs.len() > 20 { + out.push_str(&format!("\n+{} more dirs\n", dirs.len() - 20)); + } + + out +} + +pub fn auto_detect_filter(input: &str) -> fn(&str) -> String { + let first_1k = &input[..input.len().min(1024)]; + + if first_1k.contains("test result:") && first_1k.contains("passed;") { + return crate::cmds::rust::cargo_cmd::filter_cargo_test; + } + + if first_1k.contains("=== test session starts") { + return crate::cmds::python::pytest_cmd::filter_pytest_output; + } + + let first_trimmed = first_1k.trim_start(); + if first_trimmed.starts_with('{') && first_1k.contains("\"Action\"") { + return go_test_wrapper; + } + + if first_1k.contains(": error:") && first_1k.contains(".py:") { + return crate::cmds::python::mypy_cmd::filter_mypy_output; + } + + // grep/rg: lines matching file:number:content + if first_1k + .lines() + .take(5) + .filter(|l| !l.trim().is_empty()) + .any(|l| { + let parts: Vec<_> = l.splitn(3, ':').collect(); + parts.len() == 3 && parts[1].parse::().is_ok() + }) + { + return grep_wrapper; + } + + if first_1k.contains("\"testResults\"") || first_1k.contains("\"numTotalTests\"") { + return vitest_wrapper; + } + + // find/fd: all non-empty lines look like file paths, minimum 3 lines + let path_like_lines: usize = first_1k + .lines() + .filter(|l| { + let t = l.trim(); + !t.is_empty() + && !t.contains(':') + && (t.starts_with('.') || t.starts_with('/') || t.contains('/')) + }) + .count(); + let nonempty_lines: usize = first_1k.lines().filter(|l| !l.trim().is_empty()).count(); + if nonempty_lines >= 3 && path_like_lines == nonempty_lines { + return find_wrapper; + } + + identity_filter +} + +fn identity_filter(input: &str) -> String { + input.to_string() +} + +pub fn run(filter_name: Option<&str>, passthrough: bool) -> Result<()> { + let mut buf = String::new(); + std::io::stdin() + .read_to_string(&mut buf) + .map_err(|e| anyhow::anyhow!("Failed to read stdin: {}", e))?; + + if passthrough { + print!("{}", buf); + return Ok(()); + } + + let filter_fn = match filter_name { + Some(name) => resolve_filter(name).ok_or_else(|| { + anyhow::anyhow!( + "Unknown filter '{}'. Available: cargo-test, pytest, go-test, go-build, \ + tsc, vitest, grep, rg, find, fd, git-log, git-diff, git-status, \ + mypy, ruff-check, ruff-format, prettier", + name + ) + })?, + None => auto_detect_filter(&buf), + }; + + let output = filter_fn(&buf); + print!("{}", output); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_resolve_filter_cargo_test() { + let f = resolve_filter("cargo-test").expect("cargo-test filter must exist"); + let out = f("test result: ok. 5 passed; 0 failed"); + assert!(out.contains("passed") || out.contains("PASS"), "out={}", out); + } + + #[test] + fn test_resolve_filter_cargo_alias() { + assert!(resolve_filter("cargo").is_some()); + } + + #[test] + fn test_resolve_filter_grep() { + let f = resolve_filter("grep").expect("grep filter must exist"); + let input = "src/main.rs:42:fn main() {\nsrc/lib.rs:10:pub fn helper() {}\n"; + let out = f(input); + assert!( + out.contains("main.rs") || out.contains("matches"), + "out={}", + out + ); + } + + #[test] + fn test_resolve_filter_rg_alias() { + assert!(resolve_filter("rg").is_some()); + } + + #[test] + fn test_resolve_filter_pytest() { + assert!(resolve_filter("pytest").is_some()); + } + + #[test] + fn test_resolve_filter_go_test() { + assert!(resolve_filter("go-test").is_some()); + } + + #[test] + fn test_resolve_filter_tsc() { + assert!(resolve_filter("tsc").is_some()); + } + + #[test] + fn test_resolve_filter_vitest() { + assert!(resolve_filter("vitest").is_some()); + } + + #[test] + fn test_resolve_filter_git_log() { + assert!(resolve_filter("git-log").is_some()); + } + + #[test] + fn test_resolve_filter_git_diff() { + assert!(resolve_filter("git-diff").is_some()); + } + + #[test] + fn test_resolve_filter_git_status() { + assert!(resolve_filter("git-status").is_some()); + } + + #[test] + fn test_resolve_filter_unknown_returns_none() { + assert!(resolve_filter("nonexistent-filter").is_none()); + } + + #[test] + fn test_auto_detect_cargo_test() { + let input = "test result: ok. 5 passed; 0 failed; 0 ignored; 0 measured\n"; + let f = auto_detect_filter(input); + let out = f(input); + assert!(!out.is_empty()); + } + + #[test] + fn test_auto_detect_pytest() { + let input = "=== test session starts ===\ncollected 3 items\n"; + let f = auto_detect_filter(input); + let out = f(input); + assert!(!out.is_empty()); + } + + #[test] + fn test_auto_detect_grep_format() { + let input = "src/main.rs:42:fn main() {\nsrc/lib.rs:10:pub fn helper() {}\n"; + let f = auto_detect_filter(input); + let out = f(input); + assert!(!out.is_empty()); + } + + #[test] + fn test_auto_detect_go_test_ndjson() { + let input = r#"{"Time":"2024-01-01T00:00:00Z","Action":"run","Package":"example/pkg"} +{"Time":"2024-01-01T00:00:01Z","Action":"pass","Package":"example/pkg","Elapsed":0.5} +"#; + let f = auto_detect_filter(input); + let out = f(input); + assert!(!out.is_empty()); + } + + #[test] + fn test_auto_detect_unknown_returns_identity() { + let input = "some random text that doesn't match any filter pattern\n"; + let f = auto_detect_filter(input); + let out = f(input); + assert_eq!(out, input); + } + + #[test] + fn test_git_log_wrapper() { + let input = "abc1234 Fix bug in parser (2 days ago) \n\ + def5678 Add new feature (3 days ago) \n"; + let out = git_log_wrapper(input); + assert!(!out.is_empty()); + } + + #[test] + fn test_git_diff_wrapper() { + let input = "diff --git a/src/main.rs b/src/main.rs\n\ + --- a/src/main.rs\n\ + +++ b/src/main.rs\n\ + @@ -1,3 +1,4 @@\n\ + +// new comment\n\ + fn main() {}\n"; + let out = git_diff_wrapper(input); + assert!(!out.is_empty()); + } + + #[test] + fn test_resolve_filter_find() { + let f = resolve_filter("find").expect("find filter must exist"); + let input = "./src/main.rs\n./src/lib.rs\n./tests/foo.rs\n"; + let out = f(input); + assert!(out.contains("3 files"), "out={}", out); + } + + #[test] + fn test_resolve_filter_fd_alias() { + assert!(resolve_filter("fd").is_some()); + } + + #[test] + fn test_auto_detect_find_paths() { + let input = "./src/main.rs\n./src/lib.rs\n./src/cmd/mod.rs\n./tests/foo.rs\n"; + let f = auto_detect_filter(input); + let out = f(input); + assert!(out.contains("4 files"), "out={}", out); + } + + #[test] + fn test_auto_detect_find_absolute_paths() { + let input = "/home/user/src/main.rs\n/home/user/src/lib.rs\n/home/user/tests/foo.rs\n"; + let f = auto_detect_filter(input); + let out = f(input); + assert!(out.contains("3 files"), "out={}", out); + } + + #[test] + fn test_auto_detect_find_not_triggered_for_few_lines() { + let input = "./src/main.rs\n./src/lib.rs\n"; + let f = auto_detect_filter(input); + let out = f(input); + assert_eq!(out, input); + } + + #[test] + fn test_auto_detect_find_not_triggered_for_grep_output() { + let input = "src/main.rs:42:fn main() {\nsrc/lib.rs:10:pub fn helper() {}\nsrc/a.rs:1:x\n"; + let f = auto_detect_filter(input); + let out = f(input); + assert!( + !out.contains("files"), + "should not trigger find filter: out={}", + out + ); + } + + #[test] + fn test_auto_detect_empty_input_is_identity() { + let f = auto_detect_filter(""); + let out = f(""); + assert_eq!(out, ""); + } + + #[test] + fn test_auto_detect_single_line_unknown() { + let input = "hello world\n"; + let f = auto_detect_filter(input); + let out = f(input); + assert_eq!(out, input); + } + + #[test] + fn test_resolve_filter_go_build() { + assert!(resolve_filter("go-build").is_some()); + } + + #[test] + fn test_resolve_filter_mypy() { + assert!(resolve_filter("mypy").is_some()); + } + + #[test] + fn test_resolve_filter_ruff_check() { + assert!(resolve_filter("ruff-check").is_some()); + } + + #[test] + fn test_resolve_filter_ruff_format() { + assert!(resolve_filter("ruff-format").is_some()); + } + + #[test] + fn test_resolve_filter_prettier() { + assert!(resolve_filter("prettier").is_some()); + } + + #[test] + fn test_auto_detect_mypy_output() { + let input = "src/app.py:42: error: Argument 1 has incompatible type [arg-type]\n\ + src/utils.py:10: error: Missing return statement [return]\n\ + Found 2 errors in 2 files\n"; + let f = auto_detect_filter(input); + let out = f(input); + assert!(!out.is_empty()); + } +} diff --git a/src/core/mod.rs b/src/core/mod.rs index c5d1e9306..3705cfbed 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -5,6 +5,7 @@ pub mod constants; pub mod display_helpers; pub mod filter; pub mod runner; +pub mod stream; pub mod tee; pub mod telemetry; pub mod toml_filter; diff --git a/src/core/stream.rs b/src/core/stream.rs new file mode 100644 index 000000000..ea04a9e9e --- /dev/null +++ b/src/core/stream.rs @@ -0,0 +1,418 @@ +use anyhow::{Context, Result}; +use std::io::{self, BufRead, BufReader, BufWriter, Write}; +use std::process::{Command, Stdio}; + +pub trait StreamFilter { + fn feed_line(&mut self, line: &str) -> Option; + fn flush(&mut self) -> String; +} + +pub trait StdinFilter: Send { + fn feed_line(&mut self, line: &str) -> Option; + fn flush(&mut self) -> String; +} + +pub struct LineFilter Option> { + f: F, +} + +impl Option> LineFilter { + pub fn new(f: F) -> Self { + Self { f } + } +} + +impl Option> StreamFilter for LineFilter { + fn feed_line(&mut self, line: &str) -> Option { + (self.f)(line) + } + + fn flush(&mut self) -> String { + String::new() + } +} + +pub enum FilterMode { + Streaming(Box), + Buffered(fn(&str) -> String), + Passthrough, +} + +#[allow(dead_code)] +pub enum StdinMode { + Inherit, + Filter(Box), + Null, +} + +pub struct StreamResult { + pub exit_code: i32, + pub raw: String, + pub filtered: String, +} + +impl StreamResult { + #[allow(dead_code)] + pub fn success(&self) -> bool { + self.exit_code == 0 + } +} + +pub fn status_to_exit_code(status: std::process::ExitStatus) -> i32 { + if let Some(code) = status.code() { + return code; + } + #[cfg(unix)] + { + use std::os::unix::process::ExitStatusExt; + if let Some(sig) = status.signal() { + return 128 + sig; + } + } + 1 +} + +// ISSUE #897: ChildGuard RAII prevents zombie processes that caused kernel panic +pub fn run_streaming( + cmd: &mut Command, + stdin_mode: StdinMode, + stdout_mode: FilterMode, +) -> Result { + match &stdin_mode { + StdinMode::Inherit => { + cmd.stdin(Stdio::inherit()); + } + StdinMode::Filter(_) | StdinMode::Null => { + cmd.stdin(Stdio::piped()); + } + } + cmd.stdout(Stdio::piped()); + cmd.stderr(Stdio::piped()); + + struct ChildGuard(std::process::Child); + impl Drop for ChildGuard { + fn drop(&mut self) { + self.0.wait().ok(); + } + } + + let mut child = ChildGuard(cmd.spawn().context("Failed to spawn process")?); + + let stdin_thread: Option> = match stdin_mode { + StdinMode::Filter(mut filter) => { + let child_stdin = child.0.stdin.take().context("No child stdin handle")?; + Some(std::thread::spawn(move || { + let mut writer = BufWriter::new(child_stdin); + let stdin_handle = io::stdin(); + for line in BufReader::new(stdin_handle.lock()) + .lines() + .map_while(Result::ok) + { + if let Some(out) = filter.feed_line(&line) { + if writeln!(writer, "{}", out).is_err() { + break; + } + } + } + let tail = filter.flush(); + if !tail.is_empty() { + write!(writer, "{}", tail).ok(); + } + })) + } + StdinMode::Null => { + child.0.stdin.take(); + None + } + StdinMode::Inherit => None, + }; + + let stderr = child.0.stderr.take().context("No child stderr handle")?; + let stderr_thread = std::thread::spawn(move || -> String { + let mut raw_err = String::new(); + let stderr_out = io::stderr(); + let mut err_out = stderr_out.lock(); + for line in BufReader::new(stderr).lines().map_while(Result::ok) { + writeln!(err_out, "{}", line).ok(); + raw_err.push_str(&line); + raw_err.push('\n'); + } + raw_err + }); + + let stdout = child.0.stdout.take().context("No child stdout handle")?; + const RAW_CAP: usize = 1_048_576; + let mut raw_stdout = String::new(); + let mut filtered = String::new(); + + { + let stdout_handle = io::stdout(); + let mut out = stdout_handle.lock(); + + match stdout_mode { + FilterMode::Passthrough => { + for line in BufReader::new(stdout).lines().map_while(Result::ok) { + if raw_stdout.len() < RAW_CAP { + raw_stdout.push_str(&line); + raw_stdout.push('\n'); + } + match writeln!(out, "{}", line) { + Err(e) if e.kind() == io::ErrorKind::BrokenPipe => break, + Err(e) => return Err(e.into()), + Ok(_) => {} + } + } + filtered = raw_stdout.clone(); + } + FilterMode::Streaming(mut filter) => { + for line in BufReader::new(stdout).lines().map_while(Result::ok) { + if raw_stdout.len() < RAW_CAP { + raw_stdout.push_str(&line); + raw_stdout.push('\n'); + } + if let Some(output) = filter.feed_line(&line) { + filtered.push_str(&output); + match write!(out, "{}", output) { + Err(e) if e.kind() == io::ErrorKind::BrokenPipe => break, + Err(e) => return Err(e.into()), + Ok(_) => {} + } + } + } + let tail = filter.flush(); + filtered.push_str(&tail); + match write!(out, "{}", tail) { + Err(e) if e.kind() == io::ErrorKind::BrokenPipe => {} + Err(e) => return Err(e.into()), + Ok(_) => {} + } + } + FilterMode::Buffered(filter_fn) => { + for line in BufReader::new(stdout).lines().map_while(Result::ok) { + if raw_stdout.len() < RAW_CAP { + raw_stdout.push_str(&line); + raw_stdout.push('\n'); + } + } + let result = filter_fn(&raw_stdout); + filtered = result.clone(); + match write!(out, "{}", result) { + Err(e) if e.kind() == io::ErrorKind::BrokenPipe => {} + Err(e) => return Err(e.into()), + Ok(_) => {} + } + } + } + } + + let raw_stderr = stderr_thread.join().unwrap_or_else(|_| String::new()); + if let Some(t) = stdin_thread { + t.join().ok(); + } + + let status = child.0.wait().context("Failed to wait for child")?; + + Ok(StreamResult { + exit_code: status_to_exit_code(status), + raw: format!("{}{}", raw_stdout, raw_stderr), + filtered, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::process::Command; + + #[test] + fn test_exit_code_zero() { + let status = Command::new("true").status().unwrap(); + assert_eq!(status_to_exit_code(status), 0); + } + + #[test] + fn test_exit_code_nonzero() { + let status = Command::new("false").status().unwrap(); + assert_eq!(status_to_exit_code(status), 1); + } + + #[cfg(unix)] + #[test] + fn test_exit_code_signal_kill() { + let mut child = Command::new("sleep").arg("60").spawn().unwrap(); + child.kill().unwrap(); + let status = child.wait().unwrap(); + assert_eq!(status_to_exit_code(status), 137); + } + + #[test] + fn test_line_filter_passes_lines() { + let mut f = LineFilter::new(|l| Some(format!("{}\n", l.to_uppercase()))); + assert_eq!(f.feed_line("hello"), Some("HELLO\n".to_string())); + } + + #[test] + fn test_line_filter_drops_lines() { + let mut f = LineFilter::new(|l| { + if l.starts_with('#') { + None + } else { + Some(l.to_string()) + } + }); + assert_eq!(f.feed_line("# comment"), None); + assert_eq!(f.feed_line("code"), Some("code".to_string())); + } + + #[test] + fn test_line_filter_flush_empty() { + let mut f = LineFilter::new(|l| Some(l.to_string())); + assert_eq!(f.flush(), String::new()); + } + + #[test] + fn test_stream_result_success() { + let r = StreamResult { + exit_code: 0, + raw: String::new(), + filtered: String::new(), + }; + assert!(r.success()); + } + + #[test] + fn test_stream_result_failure() { + let r = StreamResult { + exit_code: 1, + raw: String::new(), + filtered: String::new(), + }; + assert!(!r.success()); + } + + #[test] + fn test_stream_result_signal_not_success() { + let r = StreamResult { + exit_code: 137, + raw: String::new(), + filtered: String::new(), + }; + assert!(!r.success()); + } + + #[test] + fn test_run_streaming_passthrough_echo() { + let mut cmd = Command::new("echo"); + cmd.arg("hello"); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); + assert_eq!(result.exit_code, 0); + assert!(result.raw.contains("hello")); + } + + #[test] + fn test_run_streaming_exit_code_preserved() { + let mut cmd = Command::new("sh"); + cmd.args(["-c", "exit 42"]); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); + assert_eq!(result.exit_code, 42); + } + + #[test] + fn test_run_streaming_exit_code_zero() { + let mut cmd = Command::new("true"); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); + assert_eq!(result.exit_code, 0); + assert!(result.success()); + } + + #[test] + fn test_run_streaming_exit_code_one() { + let mut cmd = Command::new("false"); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); + assert_eq!(result.exit_code, 1); + assert!(!result.success()); + } + + #[test] + fn test_run_streaming_streaming_filter_drops_lines() { + let mut cmd = Command::new("printf"); + cmd.arg("a\nb\nc\n"); + let filter = LineFilter::new(|l| { + if l == "b" { + None + } else { + Some(format!("{}\n", l)) + } + }); + let result = run_streaming( + &mut cmd, + StdinMode::Null, + FilterMode::Streaming(Box::new(filter)), + ) + .unwrap(); + assert!(result.filtered.contains('a')); + assert!(!result.filtered.contains('b')); + assert!(result.filtered.contains('c')); + assert_eq!(result.exit_code, 0); + } + + #[test] + fn test_run_streaming_buffered_filter() { + let mut cmd = Command::new("printf"); + cmd.arg("line1\nline2\nline3\n"); + fn upper(s: &str) -> String { + s.to_uppercase() + } + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Buffered(upper)).unwrap(); + assert!(result.filtered.contains("LINE1")); + assert!(result.filtered.contains("LINE2")); + assert_eq!(result.exit_code, 0); + } + + #[test] + fn test_run_streaming_raw_cap_at_1mb() { + let mut cmd = Command::new("sh"); + cmd.args(["-c", "yes | head -600000"]); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); + assert!( + result.raw.len() <= 1_048_576 + 100, + "raw should be capped at ~1 MiB, got {} bytes", + result.raw.len() + ); + assert!( + result.raw.len() > 100_000, + "Should have captured significant data" + ); + } + + #[test] + fn test_child_guard_prevents_zombie() { + let mut cmd = Command::new("true"); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough); + assert!(result.is_ok()); + assert_eq!(result.unwrap().exit_code, 0); + } + + #[test] + fn test_run_streaming_null_stdin_cat() { + let mut cmd = Command::new("cat"); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); + assert_eq!(result.exit_code, 0); + } + + #[test] + fn test_run_streaming_raw_contains_stdout() { + let mut cmd = Command::new("echo"); + cmd.arg("test_output_xyz"); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); + assert!(result.raw.contains("test_output_xyz")); + } + + #[test] + fn test_run_streaming_filtered_equals_raw_in_passthrough() { + let mut cmd = Command::new("echo"); + cmd.arg("check_equality"); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); + assert_eq!(result.filtered.trim(), result.raw.trim()); + } +} diff --git a/src/main.rs b/src/main.rs index 77a4be8c3..476b05167 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,5 @@ mod analytics; +mod cmd; mod cmds; mod core; mod discover; @@ -19,8 +20,8 @@ use cmds::python::{mypy_cmd, pip_cmd, pytest_cmd, ruff_cmd}; use cmds::ruby::{rake_cmd, rspec_cmd, rubocop_cmd}; use cmds::rust::{cargo_cmd, runner}; use cmds::system::{ - deps, env_cmd, find_cmd, format_cmd, grep_cmd, json_cmd, local_llm, log_cmd, ls, read, summary, - tree, wc_cmd, + deps, env_cmd, find_cmd, format_cmd, grep_cmd, json_cmd, local_llm, log_cmd, ls, pipe_cmd, + read, summary, tree, wc_cmd, }; use anyhow::{Context, Result}; @@ -559,6 +560,16 @@ enum Commands { min_occurrences: usize, }, + /// Execute a shell command via the RTK native executor (filters + tracking) + Run { + /// Command string to execute (use -c for shell-like invocation) + #[arg(short = 'c', long = "command")] + command: Option, + /// Positional command arguments (alternative to -c) + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, + }, + /// Execute command without filtering but track usage Proxy { /// Command and arguments to execute @@ -566,6 +577,17 @@ enum Commands { args: Vec, }, + /// Read stdin, apply filter, print filtered output (Unix pipe mode) + Pipe { + /// Filter name (cargo-test, pytest, grep, find, git-log, etc.) + #[arg(short, long)] + filter: Option, + + /// Pass stdin through without filtering + #[arg(long)] + passthrough: bool, + }, + /// Trust project-local TOML filters in current directory Trust { /// List all trusted projects @@ -686,10 +708,21 @@ enum Commands { #[derive(Subcommand)] enum HookCommands { + /// Process Claude Code PreToolUse hook (reads JSON from stdin) + Claude, /// Process Gemini CLI BeforeTool hook (reads JSON from stdin) Gemini, /// Process Copilot preToolUse hook (VS Code + Copilot CLI, reads JSON from stdin) Copilot, + /// Check how a command would be rewritten by the hook engine (dry-run) + Check { + /// Target agent + #[arg(long, default_value = "claude")] + agent: String, + /// Command to check + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + command: Vec, + }, } #[derive(Subcommand)] @@ -1041,6 +1074,7 @@ const RTK_META_COMMANDS: &[&str] = &[ "init", "config", "proxy", + "run", "hook-audit", "cc-economics", "verify", @@ -1915,13 +1949,31 @@ fn run_cli() -> Result { 0 } - Commands::Hook { command } => { - match command { - HookCommands::Gemini => hooks::hook_cmd::run_gemini()?, - HookCommands::Copilot => hooks::hook_cmd::run_copilot()?, + Commands::Hook { command } => match command { + HookCommands::Claude => { + cmd::hook::claude::run()?; + 0 } - 0 - } + HookCommands::Gemini => { + hooks::hook_cmd::run_gemini()?; + 0 + } + HookCommands::Copilot => { + hooks::hook_cmd::run_copilot()?; + 0 + } + HookCommands::Check { agent, command } => { + let raw = command.join(" "); + let (rewritten, allowed, exit_code) = + cmd::hook::format_for_claude(cmd::check_for_hook(&raw, &agent)); + if allowed { + println!("{}", rewritten); + } else { + eprintln!("{}", rewritten); + } + exit_code + } + }, Commands::Rewrite { args } => { let cmd = args.join(" "); @@ -1929,6 +1981,23 @@ fn run_cli() -> Result { 0 } + Commands::Pipe { + filter, + passthrough, + } => { + pipe_cmd::run(filter.as_deref(), passthrough)?; + 0 + } + + Commands::Run { command, args } => { + let raw = match command { + Some(c) => c, + None if !args.is_empty() => args.join(" "), + None => String::new(), + }; + cmd::exec::execute(&raw, cli.verbose)? + } + Commands::Proxy { args } => { use std::io::{Read, Write}; use std::process::Stdio; @@ -2344,7 +2413,7 @@ mod tests { // RTK meta-commands should produce parse errors (not fall through to raw execution). // Skip "proxy" because it uses trailing_var_arg (accepts any args by design). for cmd in RTK_META_COMMANDS { - if matches!(*cmd, "proxy" | "rewrite" | "session") { + if matches!(*cmd, "proxy" | "run" | "rewrite" | "session") { continue; // these use trailing_var_arg (accept any args by design) } let result = Cli::try_parse_from(["rtk", cmd, "--nonexistent-flag-xyz"]); @@ -2356,6 +2425,71 @@ mod tests { } } + #[test] + fn test_run_command_with_dash_c() { + let cli = Cli::try_parse_from(["rtk", "run", "-c", "git status && echo done"]).unwrap(); + match cli.command { + Commands::Run { command, args } => { + assert_eq!(command, Some("git status && echo done".to_string())); + assert!(args.is_empty()); + } + _ => panic!("Expected Run command"), + } + } + + #[test] + fn test_run_command_positional_args() { + let cli = Cli::try_parse_from(["rtk", "run", "echo", "hello"]).unwrap(); + match cli.command { + Commands::Run { command, args } => { + assert!(command.is_none()); + assert_eq!(args, vec!["echo", "hello"]); + } + _ => panic!("Expected Run command"), + } + } + + #[test] + fn test_hook_claude_parses() { + let cli = Cli::try_parse_from(["rtk", "hook", "claude"]).unwrap(); + assert!(matches!( + cli.command, + Commands::Hook { + command: HookCommands::Claude + } + )); + } + + #[test] + fn test_hook_check_parses() { + let cli = Cli::try_parse_from(["rtk", "hook", "check", "git", "status"]).unwrap(); + match cli.command { + Commands::Hook { + command: HookCommands::Check { agent, command }, + } => { + assert_eq!(agent, "claude"); + assert_eq!(command, vec!["git", "status"]); + } + _ => panic!("Expected Hook Check command"), + } + } + + #[test] + fn test_hook_check_with_agent() { + let cli = + Cli::try_parse_from(["rtk", "hook", "check", "--agent", "gemini", "cargo", "test"]) + .unwrap(); + match cli.command { + Commands::Hook { + command: HookCommands::Check { agent, command }, + } => { + assert_eq!(agent, "gemini"); + assert_eq!(command, vec!["cargo", "test"]); + } + _ => panic!("Expected Hook Check command"), + } + } + #[test] fn test_meta_command_list_is_complete() { // Verify all meta-commands are in the guard list by checking they parse with valid syntax @@ -2366,6 +2500,7 @@ mod tests { vec!["rtk", "init"], vec!["rtk", "config"], vec!["rtk", "proxy", "echo", "hi"], + vec!["rtk", "run", "-c", "echo hi"], vec!["rtk", "hook-audit"], vec!["rtk", "cc-economics"], ]; From 54861ab5b1de2bc0dc047d7ab6ea066bd8afb0eb Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Wed, 1 Apr 2026 21:42:41 +0200 Subject: [PATCH 02/44] fix(lexer): re-add strip_quotes + remove unused import --- src/cmd/mod.rs | 1 - src/discover/lexer.rs | 31 +++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/src/cmd/mod.rs b/src/cmd/mod.rs index fff0e85ce..3b1395bd9 100644 --- a/src/cmd/mod.rs +++ b/src/cmd/mod.rs @@ -15,5 +15,4 @@ pub(crate) mod lexer { #[cfg(test)] pub(crate) mod test_helpers; -pub use exec::execute; pub use hook::check_for_hook; diff --git a/src/discover/lexer.rs b/src/discover/lexer.rs index 727f5c27a..a5ea114ab 100644 --- a/src/discover/lexer.rs +++ b/src/discover/lexer.rs @@ -258,6 +258,17 @@ fn flush_arg(tokens: &mut Vec, current: &mut String, offset: usize) } } +pub fn strip_quotes(s: &str) -> String { + let chars: Vec = s.chars().collect(); + if chars.len() >= 2 + && ((chars[0] == '"' && chars[chars.len() - 1] == '"') + || (chars[0] == '\'' && chars[chars.len() - 1] == '\'')) + { + return chars[1..chars.len() - 1].iter().collect(); + } + s.to_string() +} + pub fn shell_split(input: &str) -> Vec { let mut tokens = Vec::new(); let mut current = String::new(); @@ -921,4 +932,24 @@ mod tests { fn test_shell_split_multiple_spaces() { assert_eq!(shell_split("a b c"), vec!["a", "b", "c"]); } + + #[test] + fn test_strip_quotes_double() { + assert_eq!(strip_quotes("\"hello\""), "hello"); + } + + #[test] + fn test_strip_quotes_single() { + assert_eq!(strip_quotes("'hello'"), "hello"); + } + + #[test] + fn test_strip_quotes_none() { + assert_eq!(strip_quotes("hello"), "hello"); + } + + #[test] + fn test_strip_quotes_mismatched() { + assert_eq!(strip_quotes("\"hello'"), "\"hello'"); + } } From 22892f02439fbd85593b0925e558852dafcb5da0 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sat, 4 Apr 2026 18:45:30 +0200 Subject: [PATCH 03/44] feat(hooks): native hook handlers, remove bash scripts What changed: - Add `run_claude()` with permissions check, audit logging, tool_input preservation, and Ask/Allow/Deny support - Add `run_cursor()` with flat JSON format (`permission`/`updated_input`) - Add `audit_log()` (best-effort append when RTK_HOOK_AUDIT=1) - Fix `run_gemini()` to load exclude_commands from config - Convert all hook stdout to `writeln!` with `#[deny(clippy::print_stdout)]` to prevent JSON protocol corruption (Claude Code bug #4669) - Replace string-based heredoc detection with lexer-based `has_heredoc()` (quote-aware: `<<` inside quotes no longer false-positives) - Add shell prefix peeling (noglob, command, builtin, exec, nocorrect) to `rewrite_segment()` in registry.rs - Fix python3 -m pytest pattern, add pip show, add gt (Graphite) to RULES - Remove `command ` from IGNORED_PREFIXES (was blocking `command git status`) - Register `rtk hook claude`/`rtk hook cursor` binary commands in settings.json instead of writing bash script files - Add legacy script migration (deletes old rtk-rewrite.sh on `rtk init`) - Simplify hook_check and integrity for script-free model --- hooks/claude/rtk-rewrite.sh | 98 -- hooks/claude/test-rtk-rewrite.sh | 442 --------- hooks/copilot/test-rtk-rewrite.sh | 293 ------ hooks/cursor/rtk-rewrite.sh | 54 - src/cmd/analysis.rs | 693 ------------- src/cmd/builtins.rs | 258 ----- src/cmd/exec.rs | 472 --------- src/cmd/filters.rs | 326 ------ src/cmd/hook/claude.rs | 539 ---------- src/cmd/hook/mod.rs | 1536 ----------------------------- src/cmd/mod.rs | 18 - src/cmd/predicates.rs | 75 -- src/cmd/test_helpers.rs | 32 - src/discover/registry.rs | 98 +- src/discover/rules.rs | 16 +- src/hooks/constants.rs | 5 + src/hooks/hook_check.rs | 70 +- src/hooks/hook_cmd.rs | 458 ++++++++- src/hooks/init.rs | 482 +++++---- src/hooks/integrity.rs | 34 +- src/hooks/mod.rs | 1 + src/main.rs | 46 +- 22 files changed, 911 insertions(+), 5135 deletions(-) delete mode 100644 hooks/claude/rtk-rewrite.sh delete mode 100644 hooks/claude/test-rtk-rewrite.sh delete mode 100644 hooks/copilot/test-rtk-rewrite.sh delete mode 100644 hooks/cursor/rtk-rewrite.sh delete mode 100644 src/cmd/analysis.rs delete mode 100644 src/cmd/builtins.rs delete mode 100644 src/cmd/exec.rs delete mode 100644 src/cmd/filters.rs delete mode 100644 src/cmd/hook/claude.rs delete mode 100644 src/cmd/hook/mod.rs delete mode 100644 src/cmd/mod.rs delete mode 100644 src/cmd/predicates.rs delete mode 100644 src/cmd/test_helpers.rs diff --git a/hooks/claude/rtk-rewrite.sh b/hooks/claude/rtk-rewrite.sh deleted file mode 100644 index f7a42b5d4..000000000 --- a/hooks/claude/rtk-rewrite.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bash -# rtk-hook-version: 3 -# RTK Claude Code hook — rewrites commands to use rtk for token savings. -# Requires: rtk >= 0.23.0, jq -# -# This is a thin delegating hook: all rewrite logic lives in `rtk rewrite`, -# which is the single source of truth (src/discover/registry.rs). -# To add or change rewrite rules, edit the Rust registry — not this file. -# -# Exit code protocol for `rtk rewrite`: -# 0 + stdout Rewrite found, no deny/ask rule matched → auto-allow -# 1 No RTK equivalent → pass through unchanged -# 2 Deny rule matched → pass through (Claude Code native deny handles it) -# 3 + stdout Ask rule matched → rewrite but let Claude Code prompt the user - -if ! command -v jq &>/dev/null; then - echo "[rtk] WARNING: jq is not installed. Hook cannot rewrite commands. Install jq: https://jqlang.github.io/jq/download/" >&2 - exit 0 -fi - -if ! command -v rtk &>/dev/null; then - echo "[rtk] WARNING: rtk is not installed or not in PATH. Hook cannot rewrite commands. Install: https://github.com/rtk-ai/rtk#installation" >&2 - exit 0 -fi - -# Version guard: rtk rewrite was added in 0.23.0. -# Older binaries: warn once and exit cleanly (no silent failure). -RTK_VERSION=$(rtk --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) -if [ -n "$RTK_VERSION" ]; then - MAJOR=$(echo "$RTK_VERSION" | cut -d. -f1) - MINOR=$(echo "$RTK_VERSION" | cut -d. -f2) - # Require >= 0.23.0 - if [ "$MAJOR" -eq 0 ] && [ "$MINOR" -lt 23 ]; then - echo "[rtk] WARNING: rtk $RTK_VERSION is too old (need >= 0.23.0). Upgrade: cargo install rtk" >&2 - exit 0 - fi -fi - -INPUT=$(cat) -CMD=$(echo "$INPUT" | jq -r '.tool_input.command // empty') - -if [ -z "$CMD" ]; then - exit 0 -fi - -# Delegate all rewrite + permission logic to the Rust binary. -REWRITTEN=$(rtk rewrite "$CMD" 2>/dev/null) -EXIT_CODE=$? - -case $EXIT_CODE in - 0) - # Rewrite found, no permission rules matched — safe to auto-allow. - # If the output is identical, the command was already using RTK. - [ "$CMD" = "$REWRITTEN" ] && exit 0 - ;; - 1) - # No RTK equivalent — pass through unchanged. - exit 0 - ;; - 2) - # Deny rule matched — let Claude Code's native deny rule handle it. - exit 0 - ;; - 3) - # Ask rule matched — rewrite the command but do NOT auto-allow so that - # Claude Code prompts the user for confirmation. - ;; - *) - exit 0 - ;; -esac - -ORIGINAL_INPUT=$(echo "$INPUT" | jq -c '.tool_input') -UPDATED_INPUT=$(echo "$ORIGINAL_INPUT" | jq --arg cmd "$REWRITTEN" '.command = $cmd') - -if [ "$EXIT_CODE" -eq 3 ]; then - # Ask: rewrite the command, omit permissionDecision so Claude Code prompts. - jq -n \ - --argjson updated "$UPDATED_INPUT" \ - '{ - "hookSpecificOutput": { - "hookEventName": "PreToolUse", - "updatedInput": $updated - } - }' -else - # Allow: rewrite the command and auto-allow. - jq -n \ - --argjson updated "$UPDATED_INPUT" \ - '{ - "hookSpecificOutput": { - "hookEventName": "PreToolUse", - "permissionDecision": "allow", - "permissionDecisionReason": "RTK auto-rewrite", - "updatedInput": $updated - } - }' -fi diff --git a/hooks/claude/test-rtk-rewrite.sh b/hooks/claude/test-rtk-rewrite.sh deleted file mode 100644 index 85103163b..000000000 --- a/hooks/claude/test-rtk-rewrite.sh +++ /dev/null @@ -1,442 +0,0 @@ -#!/usr/bin/env bash -# Test suite for rtk-rewrite.sh -# Feeds mock JSON through the hook and verifies the rewritten commands. -# -# Usage: bash ~/.claude/hooks/test-rtk-rewrite.sh - -HOOK="${HOOK:-$HOME/.claude/hooks/rtk-rewrite.sh}" -PASS=0 -FAIL=0 -TOTAL=0 - -# Colors -GREEN='\033[32m' -RED='\033[31m' -DIM='\033[2m' -RESET='\033[0m' - -test_rewrite() { - local description="$1" - local input_cmd="$2" - local expected_cmd="$3" # empty string = expect no rewrite - TOTAL=$((TOTAL + 1)) - - local input_json - input_json=$(jq -n --arg cmd "$input_cmd" '{"tool_name":"Bash","tool_input":{"command":$cmd}}') - local output - output=$(echo "$input_json" | bash "$HOOK" 2>/dev/null) || true - - if [ -z "$expected_cmd" ]; then - # Expect no rewrite (hook exits 0 with no output) - if [ -z "$output" ]; then - printf " ${GREEN}PASS${RESET} %s ${DIM}→ (no rewrite)${RESET}\n" "$description" - PASS=$((PASS + 1)) - else - local actual - actual=$(echo "$output" | jq -r '.hookSpecificOutput.updatedInput.command // empty') - printf " ${RED}FAIL${RESET} %s\n" "$description" - printf " expected: (no rewrite)\n" - printf " actual: %s\n" "$actual" - FAIL=$((FAIL + 1)) - fi - else - local actual - actual=$(echo "$output" | jq -r '.hookSpecificOutput.updatedInput.command // empty' 2>/dev/null) - if [ "$actual" = "$expected_cmd" ]; then - printf " ${GREEN}PASS${RESET} %s ${DIM}→ %s${RESET}\n" "$description" "$actual" - PASS=$((PASS + 1)) - else - printf " ${RED}FAIL${RESET} %s\n" "$description" - printf " expected: %s\n" "$expected_cmd" - printf " actual: %s\n" "$actual" - FAIL=$((FAIL + 1)) - fi - fi -} - -echo "============================================" -echo " RTK Rewrite Hook Test Suite" -echo "============================================" -echo "" - -# ---- SECTION 1: Existing patterns (regression tests) ---- -echo "--- Existing patterns (regression) ---" -test_rewrite "git status" \ - "git status" \ - "rtk git status" - -test_rewrite "git log --oneline -10" \ - "git log --oneline -10" \ - "rtk git log --oneline -10" - -test_rewrite "git diff HEAD" \ - "git diff HEAD" \ - "rtk git diff HEAD" - -test_rewrite "git show abc123" \ - "git show abc123" \ - "rtk git show abc123" - -test_rewrite "git add ." \ - "git add ." \ - "rtk git add ." - -test_rewrite "gh pr list" \ - "gh pr list" \ - "rtk gh pr list" - -test_rewrite "npx playwright test" \ - "npx playwright test" \ - "rtk playwright test" - -test_rewrite "ls -la" \ - "ls -la" \ - "rtk ls -la" - -test_rewrite "curl -s https://example.com" \ - "curl -s https://example.com" \ - "rtk curl -s https://example.com" - -test_rewrite "cat package.json" \ - "cat package.json" \ - "rtk read package.json" - -test_rewrite "grep -rn pattern src/" \ - "grep -rn pattern src/" \ - "rtk grep -rn pattern src/" - -test_rewrite "rg pattern src/" \ - "rg pattern src/" \ - "rtk grep pattern src/" - -test_rewrite "cargo test" \ - "cargo test" \ - "rtk cargo test" - -test_rewrite "npx prisma migrate" \ - "npx prisma migrate" \ - "rtk prisma migrate" - -echo "" - -# ---- SECTION 2: Env var prefix handling (THE BIG FIX) ---- -echo "--- Env var prefix handling (new) ---" -test_rewrite "env + playwright" \ - "TEST_SESSION_ID=2 npx playwright test --config=foo" \ - "TEST_SESSION_ID=2 rtk playwright test --config=foo" - -test_rewrite "env + git status" \ - "GIT_PAGER=cat git status" \ - "GIT_PAGER=cat rtk git status" - -test_rewrite "env + git log" \ - "GIT_PAGER=cat git log --oneline -10" \ - "GIT_PAGER=cat rtk git log --oneline -10" - -test_rewrite "multi env + vitest" \ - "NODE_ENV=test CI=1 npx vitest run" \ - "NODE_ENV=test CI=1 rtk vitest run" - -test_rewrite "env + ls" \ - "LANG=C ls -la" \ - "LANG=C rtk ls -la" - -test_rewrite "env + npm run" \ - "NODE_ENV=test npm run test:e2e" \ - "NODE_ENV=test rtk npm test:e2e" - -test_rewrite "env + docker compose (unsupported subcommand, NOT rewritten)" \ - "COMPOSE_PROJECT_NAME=test docker compose up -d" \ - "" - -test_rewrite "env + docker compose logs (supported, rewritten)" \ - "COMPOSE_PROJECT_NAME=test docker compose logs web" \ - "COMPOSE_PROJECT_NAME=test rtk docker compose logs web" - -echo "" - -# ---- SECTION 3: New patterns ---- -echo "--- New patterns ---" -test_rewrite "npm run test:e2e" \ - "npm run test:e2e" \ - "rtk npm test:e2e" - -test_rewrite "npm run build" \ - "npm run build" \ - "rtk npm build" - -test_rewrite "npm test" \ - "npm test" \ - "rtk npm test" - -test_rewrite "vue-tsc -b" \ - "vue-tsc -b" \ - "rtk tsc -b" - -test_rewrite "npx vue-tsc --noEmit" \ - "npx vue-tsc --noEmit" \ - "rtk tsc --noEmit" - -test_rewrite "docker compose up -d (NOT rewritten — unsupported by rtk)" \ - "docker compose up -d" \ - "" - -test_rewrite "docker compose logs postgrest" \ - "docker compose logs postgrest" \ - "rtk docker compose logs postgrest" - -test_rewrite "docker compose ps" \ - "docker compose ps" \ - "rtk docker compose ps" - -test_rewrite "docker compose build" \ - "docker compose build" \ - "rtk docker compose build" - -test_rewrite "docker compose down (NOT rewritten — unsupported by rtk)" \ - "docker compose down" \ - "" - -test_rewrite "docker compose -f file.yml up (NOT rewritten — flag before subcommand)" \ - "docker compose -f docker-compose.preview.yml --project-name myapp up -d --build" \ - "" - -test_rewrite "docker run --rm postgres" \ - "docker run --rm postgres" \ - "rtk docker run --rm postgres" - -test_rewrite "docker exec -it db psql" \ - "docker exec -it db psql" \ - "rtk docker exec -it db psql" - -test_rewrite "find (NOT rewritten — different arg format)" \ - "find . -name '*.ts'" \ - "" - -test_rewrite "tree (NOT rewritten — different arg format)" \ - "tree src/" \ - "" - -test_rewrite "wget (NOT rewritten — different arg format)" \ - "wget https://example.com/file" \ - "" - -test_rewrite "gh api repos/owner/repo" \ - "gh api repos/owner/repo" \ - "rtk gh api repos/owner/repo" - -test_rewrite "gh release list" \ - "gh release list" \ - "rtk gh release list" - -test_rewrite "kubectl describe pod foo" \ - "kubectl describe pod foo" \ - "rtk kubectl describe pod foo" - -test_rewrite "kubectl apply -f deploy.yaml" \ - "kubectl apply -f deploy.yaml" \ - "rtk kubectl apply -f deploy.yaml" - -echo "" - -# ---- SECTION 3b: RTK_DISABLED and redirect fixes (#345, #346) ---- -echo "--- RTK_DISABLED (#345) ---" -test_rewrite "RTK_DISABLED=1 git status (no rewrite)" \ - "RTK_DISABLED=1 git status" \ - "" - -test_rewrite "RTK_DISABLED=1 cargo test (no rewrite)" \ - "RTK_DISABLED=1 cargo test" \ - "" - -test_rewrite "FOO=1 RTK_DISABLED=1 git status (no rewrite)" \ - "FOO=1 RTK_DISABLED=1 git status" \ - "" - -echo "" -echo "--- Redirect operators (#346) ---" -test_rewrite "cargo test 2>&1 | head" \ - "cargo test 2>&1 | head" \ - "rtk cargo test 2>&1 | head" - -test_rewrite "cargo test 2>&1" \ - "cargo test 2>&1" \ - "rtk cargo test 2>&1" - -test_rewrite "cargo test &>/dev/null" \ - "cargo test &>/dev/null" \ - "rtk cargo test &>/dev/null" - -# Note: the bash hook rewrites only the first command segment (sed-based); -# full compound rewriting (both sides of &) is handled by `rtk rewrite` (Rust). -# The critical behavior tested here: `&` after `cargo test` is NOT mistaken for -# a redirect — the hook still rewrites cargo test, no crash. -test_rewrite "cargo test & git status (bash hook rewrites first segment only)" \ - "cargo test & git status" \ - "rtk cargo test & git status" - -echo "" - -# ---- SECTION 4: Vitest edge case (fixed double "run" bug) ---- -echo "--- Vitest run dedup ---" -test_rewrite "vitest (no args)" \ - "vitest" \ - "rtk vitest run" - -test_rewrite "vitest run (no double run)" \ - "vitest run" \ - "rtk vitest run" - -test_rewrite "vitest run --reporter" \ - "vitest run --reporter=verbose" \ - "rtk vitest run --reporter=verbose" - -test_rewrite "npx vitest run" \ - "npx vitest run" \ - "rtk vitest run" - -test_rewrite "pnpm vitest run --coverage" \ - "pnpm vitest run --coverage" \ - "rtk vitest run --coverage" - -echo "" - -# ---- SECTION 5: Should NOT rewrite ---- -echo "--- Should NOT rewrite ---" -test_rewrite "already rtk" \ - "rtk git status" \ - "" - -test_rewrite "heredoc" \ - "cat <<'EOF' -hello -EOF" \ - "" - -test_rewrite "echo (no pattern)" \ - "echo hello world" \ - "" - -test_rewrite "cd (no pattern)" \ - "cd /tmp" \ - "" - -test_rewrite "mkdir (no pattern)" \ - "mkdir -p foo/bar" \ - "" - -test_rewrite "python3 (no pattern)" \ - "python3 script.py" \ - "" - -test_rewrite "node (no pattern)" \ - "node -e 'console.log(1)'" \ - "" - -echo "" - -# ---- SECTION 6: Audit logging ---- -echo "--- Audit logging (RTK_HOOK_AUDIT=1) ---" - -AUDIT_TMPDIR=$(mktemp -d) -trap "rm -rf $AUDIT_TMPDIR" EXIT - -test_audit_log() { - local description="$1" - local input_cmd="$2" - local expected_action="$3" - TOTAL=$((TOTAL + 1)) - - # Clean log - rm -f "$AUDIT_TMPDIR/hook-audit.log" - - local input_json - input_json=$(jq -n --arg cmd "$input_cmd" '{"tool_name":"Bash","tool_input":{"command":$cmd}}') - echo "$input_json" | RTK_HOOK_AUDIT=1 RTK_AUDIT_DIR="$AUDIT_TMPDIR" bash "$HOOK" 2>/dev/null || true - - if [ ! -f "$AUDIT_TMPDIR/hook-audit.log" ]; then - printf " ${RED}FAIL${RESET} %s (no log file created)\n" "$description" - FAIL=$((FAIL + 1)) - return - fi - - local log_line - log_line=$(head -1 "$AUDIT_TMPDIR/hook-audit.log") - local actual_action - actual_action=$(echo "$log_line" | cut -d'|' -f2 | tr -d ' ') - - if [ "$actual_action" = "$expected_action" ]; then - printf " ${GREEN}PASS${RESET} %s ${DIM}→ %s${RESET}\n" "$description" "$actual_action" - PASS=$((PASS + 1)) - else - printf " ${RED}FAIL${RESET} %s\n" "$description" - printf " expected action: %s\n" "$expected_action" - printf " actual action: %s\n" "$actual_action" - printf " log line: %s\n" "$log_line" - FAIL=$((FAIL + 1)) - fi -} - -test_audit_log "audit: rewrite git status" \ - "git status" \ - "rewrite" - -test_audit_log "audit: skip already_rtk" \ - "rtk git status" \ - "skip:already_rtk" - -test_audit_log "audit: skip heredoc" \ - "cat <<'EOF' -hello -EOF" \ - "skip:heredoc" - -test_audit_log "audit: skip no_match" \ - "echo hello world" \ - "skip:no_match" - -test_audit_log "audit: rewrite cargo test" \ - "cargo test" \ - "rewrite" - -# Test log format (4 pipe-separated fields) -rm -f "$AUDIT_TMPDIR/hook-audit.log" -input_json=$(jq -n --arg cmd "git status" '{"tool_name":"Bash","tool_input":{"command":$cmd}}') -echo "$input_json" | RTK_HOOK_AUDIT=1 RTK_AUDIT_DIR="$AUDIT_TMPDIR" bash "$HOOK" 2>/dev/null || true -TOTAL=$((TOTAL + 1)) -log_line=$(cat "$AUDIT_TMPDIR/hook-audit.log" 2>/dev/null || echo "") -field_count=$(echo "$log_line" | awk -F' \\| ' '{print NF}') -if [ "$field_count" = "4" ]; then - printf " ${GREEN}PASS${RESET} audit: log format has 4 fields ${DIM}→ %s${RESET}\n" "$log_line" - PASS=$((PASS + 1)) -else - printf " ${RED}FAIL${RESET} audit: log format (expected 4 fields, got %s)\n" "$field_count" - printf " log line: %s\n" "$log_line" - FAIL=$((FAIL + 1)) -fi - -# Test no log when RTK_HOOK_AUDIT is unset -rm -f "$AUDIT_TMPDIR/hook-audit.log" -input_json=$(jq -n --arg cmd "git status" '{"tool_name":"Bash","tool_input":{"command":$cmd}}') -echo "$input_json" | RTK_AUDIT_DIR="$AUDIT_TMPDIR" bash "$HOOK" 2>/dev/null || true -TOTAL=$((TOTAL + 1)) -if [ ! -f "$AUDIT_TMPDIR/hook-audit.log" ]; then - printf " ${GREEN}PASS${RESET} audit: no log when RTK_HOOK_AUDIT unset\n" - PASS=$((PASS + 1)) -else - printf " ${RED}FAIL${RESET} audit: log created when RTK_HOOK_AUDIT unset\n" - FAIL=$((FAIL + 1)) -fi - -echo "" - -# ---- SUMMARY ---- -echo "============================================" -if [ $FAIL -eq 0 ]; then - printf " ${GREEN}ALL $TOTAL TESTS PASSED${RESET}\n" -else - printf " ${RED}$FAIL FAILED${RESET} / $TOTAL total ($PASS passed)\n" -fi -echo "============================================" - -exit $FAIL diff --git a/hooks/copilot/test-rtk-rewrite.sh b/hooks/copilot/test-rtk-rewrite.sh deleted file mode 100644 index f1cca9497..000000000 --- a/hooks/copilot/test-rtk-rewrite.sh +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/env bash -# Test suite for rtk hook (cross-platform preToolUse handler). -# Feeds mock preToolUse JSON through `rtk hook` and verifies allow/deny decisions. -# -# Usage: bash hooks/test-copilot-rtk-rewrite.sh -# -# Copilot CLI input format: -# {"toolName":"bash","toolArgs":"{\"command\":\"...\"}"} -# Output on intercept: {"permissionDecision":"deny","permissionDecisionReason":"..."} -# -# VS Code Copilot Chat input format: -# {"tool_name":"Bash","tool_input":{"command":"..."}} -# Output on intercept: {"hookSpecificOutput":{"permissionDecision":"allow","updatedInput":{...}}} -# -# Output on pass-through: empty (exit 0) - -RTK="${RTK:-rtk}" -PASS=0 -FAIL=0 -TOTAL=0 - -# Colors -GREEN='\033[32m' -RED='\033[31m' -DIM='\033[2m' -RESET='\033[0m' - -# Build a Copilot CLI preToolUse input JSON -copilot_bash_input() { - local cmd="$1" - local tool_args - tool_args=$(jq -cn --arg cmd "$cmd" '{"command":$cmd}') - jq -cn --arg ta "$tool_args" '{"toolName":"bash","toolArgs":$ta}' -} - -# Build a VS Code Copilot Chat preToolUse input JSON -vscode_bash_input() { - local cmd="$1" - jq -cn --arg cmd "$cmd" '{"tool_name":"Bash","tool_input":{"command":$cmd}}' -} - -# Build a non-bash tool input -tool_input() { - local tool_name="$1" - jq -cn --arg t "$tool_name" '{"toolName":$t,"toolArgs":"{}"}' -} - -# Assert Copilot CLI: hook denies and reason contains the expected rtk command -test_deny() { - local description="$1" - local input_cmd="$2" - local expected_rtk="$3" - TOTAL=$((TOTAL + 1)) - - local output - output=$(copilot_bash_input "$input_cmd" | "$RTK" hook 2>/dev/null) || true - - local decision reason - decision=$(echo "$output" | jq -r '.permissionDecision // empty' 2>/dev/null) - reason=$(echo "$output" | jq -r '.permissionDecisionReason // empty' 2>/dev/null) - - if [ "$decision" = "deny" ] && echo "$reason" | grep -qF "$expected_rtk"; then - printf " ${GREEN}DENY${RESET} %s ${DIM}→ %s${RESET}\n" "$description" "$expected_rtk" - PASS=$((PASS + 1)) - else - printf " ${RED}FAIL${RESET} %s\n" "$description" - printf " expected decision: deny, reason containing: %s\n" "$expected_rtk" - printf " actual decision: %s\n" "$decision" - printf " actual reason: %s\n" "$reason" - FAIL=$((FAIL + 1)) - fi -} - -# Assert VS Code Copilot Chat: hook returns updatedInput (allow) with rewritten command -test_vscode_rewrite() { - local description="$1" - local input_cmd="$2" - local expected_rtk="$3" - TOTAL=$((TOTAL + 1)) - - local output - output=$(vscode_bash_input "$input_cmd" | "$RTK" hook 2>/dev/null) || true - - local decision updated_cmd - decision=$(echo "$output" | jq -r '.hookSpecificOutput.permissionDecision // empty' 2>/dev/null) - updated_cmd=$(echo "$output" | jq -r '.hookSpecificOutput.updatedInput.command // empty' 2>/dev/null) - - if [ "$decision" = "allow" ] && echo "$updated_cmd" | grep -qF "$expected_rtk"; then - printf " ${GREEN}REWRITE${RESET} %s ${DIM}→ %s${RESET}\n" "$description" "$updated_cmd" - PASS=$((PASS + 1)) - else - printf " ${RED}FAIL${RESET} %s\n" "$description" - printf " expected decision: allow, updatedInput containing: %s\n" "$expected_rtk" - printf " actual decision: %s\n" "$decision" - printf " actual updatedInput: %s\n" "$updated_cmd" - FAIL=$((FAIL + 1)) - fi -} - -# Assert the hook emits no output (pass-through) -test_allow() { - local description="$1" - local input="$2" - TOTAL=$((TOTAL + 1)) - - local output - output=$(echo "$input" | "$RTK" hook 2>/dev/null) || true - - if [ -z "$output" ]; then - printf " ${GREEN}PASS${RESET} %s ${DIM}→ (allow)${RESET}\n" "$description" - PASS=$((PASS + 1)) - else - local decision - decision=$(echo "$output" | jq -r '.permissionDecision // .hookSpecificOutput.permissionDecision // empty' 2>/dev/null) - printf " ${RED}FAIL${RESET} %s\n" "$description" - printf " expected: (no output)\n" - printf " actual: permissionDecision=%s\n" "$decision" - FAIL=$((FAIL + 1)) - fi -} - -echo "============================================" -echo " RTK Hook Test Suite (rtk hook)" -echo "============================================" -echo "" - -# ---- SECTION 1: Copilot CLI — commands that should be denied ---- -echo "--- Copilot CLI: intercepted (deny with rtk suggestion) ---" - -test_deny "git status" \ - "git status" \ - "rtk git status" - -test_deny "git log --oneline -10" \ - "git log --oneline -10" \ - "rtk git log" - -test_deny "git diff HEAD" \ - "git diff HEAD" \ - "rtk git diff" - -test_deny "cargo test" \ - "cargo test" \ - "rtk cargo test" - -test_deny "cargo clippy --all-targets" \ - "cargo clippy --all-targets" \ - "rtk cargo clippy" - -test_deny "cargo build" \ - "cargo build" \ - "rtk cargo build" - -test_deny "grep -rn pattern src/" \ - "grep -rn pattern src/" \ - "rtk grep" - -test_deny "gh pr list" \ - "gh pr list" \ - "rtk gh" - -echo "" - -# ---- SECTION 2: VS Code Copilot Chat — commands that should be rewritten via updatedInput ---- -echo "--- VS Code Copilot Chat: intercepted (updatedInput rewrite) ---" - -test_vscode_rewrite "git status" \ - "git status" \ - "rtk git status" - -test_vscode_rewrite "cargo test" \ - "cargo test" \ - "rtk cargo test" - -test_vscode_rewrite "gh pr list" \ - "gh pr list" \ - "rtk gh" - -echo "" - -# ---- SECTION 3: Pass-through cases ---- -echo "--- Pass-through (allow silently) ---" - -test_allow "Copilot CLI: already rtk: rtk git status" \ - "$(copilot_bash_input "rtk git status")" - -test_allow "Copilot CLI: already rtk: rtk cargo test" \ - "$(copilot_bash_input "rtk cargo test")" - -test_allow "Copilot CLI: heredoc" \ - "$(copilot_bash_input "cat <<'EOF' -hello -EOF")" - -test_allow "Copilot CLI: unknown command: htop" \ - "$(copilot_bash_input "htop")" - -test_allow "Copilot CLI: unknown command: echo" \ - "$(copilot_bash_input "echo hello world")" - -test_allow "Copilot CLI: non-bash tool: view" \ - "$(tool_input "view")" - -test_allow "Copilot CLI: non-bash tool: edit" \ - "$(tool_input "edit")" - -test_allow "VS Code: already rtk" \ - "$(vscode_bash_input "rtk git status")" - -test_allow "VS Code: non-bash tool: editFiles" \ - "$(jq -cn '{"tool_name":"editFiles"}')" - -echo "" - -# ---- SECTION 4: Output format assertions ---- -echo "--- Output format ---" - -# Copilot CLI output format -TOTAL=$((TOTAL + 1)) -raw_output=$(copilot_bash_input "git status" | "$RTK" hook 2>/dev/null) - -if echo "$raw_output" | jq . >/dev/null 2>&1; then - printf " ${GREEN}PASS${RESET} Copilot CLI: output is valid JSON\n" - PASS=$((PASS + 1)) -else - printf " ${RED}FAIL${RESET} Copilot CLI: output is not valid JSON: %s\n" "$raw_output" - FAIL=$((FAIL + 1)) -fi - -TOTAL=$((TOTAL + 1)) -decision=$(echo "$raw_output" | jq -r '.permissionDecision') -if [ "$decision" = "deny" ]; then - printf " ${GREEN}PASS${RESET} Copilot CLI: permissionDecision == \"deny\"\n" - PASS=$((PASS + 1)) -else - printf " ${RED}FAIL${RESET} Copilot CLI: expected \"deny\", got \"%s\"\n" "$decision" - FAIL=$((FAIL + 1)) -fi - -TOTAL=$((TOTAL + 1)) -reason=$(echo "$raw_output" | jq -r '.permissionDecisionReason') -if echo "$reason" | grep -qE '`rtk [^`]+`'; then - printf " ${GREEN}PASS${RESET} Copilot CLI: reason contains backtick-quoted rtk command ${DIM}→ %s${RESET}\n" "$reason" - PASS=$((PASS + 1)) -else - printf " ${RED}FAIL${RESET} Copilot CLI: reason missing backtick-quoted command: %s\n" "$reason" - FAIL=$((FAIL + 1)) -fi - -# VS Code output format -TOTAL=$((TOTAL + 1)) -vscode_output=$(vscode_bash_input "git status" | "$RTK" hook 2>/dev/null) - -if echo "$vscode_output" | jq . >/dev/null 2>&1; then - printf " ${GREEN}PASS${RESET} VS Code: output is valid JSON\n" - PASS=$((PASS + 1)) -else - printf " ${RED}FAIL${RESET} VS Code: output is not valid JSON: %s\n" "$vscode_output" - FAIL=$((FAIL + 1)) -fi - -TOTAL=$((TOTAL + 1)) -vscode_decision=$(echo "$vscode_output" | jq -r '.hookSpecificOutput.permissionDecision') -if [ "$vscode_decision" = "allow" ]; then - printf " ${GREEN}PASS${RESET} VS Code: hookSpecificOutput.permissionDecision == \"allow\"\n" - PASS=$((PASS + 1)) -else - printf " ${RED}FAIL${RESET} VS Code: expected \"allow\", got \"%s\"\n" "$vscode_decision" - FAIL=$((FAIL + 1)) -fi - -TOTAL=$((TOTAL + 1)) -vscode_updated=$(echo "$vscode_output" | jq -r '.hookSpecificOutput.updatedInput.command') -if echo "$vscode_updated" | grep -q "^rtk "; then - printf " ${GREEN}PASS${RESET} VS Code: updatedInput.command starts with rtk ${DIM}→ %s${RESET}\n" "$vscode_updated" - PASS=$((PASS + 1)) -else - printf " ${RED}FAIL${RESET} VS Code: updatedInput.command should start with rtk: %s\n" "$vscode_updated" - FAIL=$((FAIL + 1)) -fi - -echo "" - -# ---- SUMMARY ---- -echo "============================================" -if [ $FAIL -eq 0 ]; then - printf " ${GREEN}ALL $TOTAL TESTS PASSED${RESET}\n" -else - printf " ${RED}$FAIL FAILED${RESET} / $TOTAL total ($PASS passed)\n" -fi -echo "============================================" - -exit $FAIL diff --git a/hooks/cursor/rtk-rewrite.sh b/hooks/cursor/rtk-rewrite.sh deleted file mode 100644 index 4b80b260c..000000000 --- a/hooks/cursor/rtk-rewrite.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash -# rtk-hook-version: 1 -# RTK Cursor Agent hook — rewrites shell commands to use rtk for token savings. -# Works with both Cursor editor and cursor-cli (they share ~/.cursor/hooks.json). -# Cursor preToolUse hook format: receives JSON on stdin, returns JSON on stdout. -# Requires: rtk >= 0.23.0, jq -# -# This is a thin delegating hook: all rewrite logic lives in `rtk rewrite`, -# which is the single source of truth (src/discover/registry.rs). -# To add or change rewrite rules, edit the Rust registry — not this file. - -if ! command -v jq &>/dev/null; then - echo "[rtk] WARNING: jq is not installed. Hook cannot rewrite commands. Install jq: https://jqlang.github.io/jq/download/" >&2 - exit 0 -fi - -if ! command -v rtk &>/dev/null; then - echo "[rtk] WARNING: rtk is not installed or not in PATH. Hook cannot rewrite commands. Install: https://github.com/rtk-ai/rtk#installation" >&2 - exit 0 -fi - -# Version guard: rtk rewrite was added in 0.23.0. -RTK_VERSION=$(rtk --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) -if [ -n "$RTK_VERSION" ]; then - MAJOR=$(echo "$RTK_VERSION" | cut -d. -f1) - MINOR=$(echo "$RTK_VERSION" | cut -d. -f2) - if [ "$MAJOR" -eq 0 ] && [ "$MINOR" -lt 23 ]; then - echo "[rtk] WARNING: rtk $RTK_VERSION is too old (need >= 0.23.0). Upgrade: cargo install rtk" >&2 - exit 0 - fi -fi - -INPUT=$(cat) -CMD=$(echo "$INPUT" | jq -r '.tool_input.command // empty') - -if [ -z "$CMD" ]; then - echo '{}' - exit 0 -fi - -# Delegate all rewrite logic to the Rust binary. -# rtk rewrite exits 1 when there's no rewrite — hook passes through silently. -REWRITTEN=$(rtk rewrite "$CMD" 2>/dev/null) || { echo '{}'; exit 0; } - -# No change — nothing to do. -if [ "$CMD" = "$REWRITTEN" ]; then - echo '{}' - exit 0 -fi - -jq -n --arg cmd "$REWRITTEN" '{ - "permission": "allow", - "updated_input": { "command": $cmd } -}' diff --git a/src/cmd/analysis.rs b/src/cmd/analysis.rs deleted file mode 100644 index 114e1ba98..000000000 --- a/src/cmd/analysis.rs +++ /dev/null @@ -1,693 +0,0 @@ -use super::lexer::{strip_quotes, ParsedToken, TokenKind}; - -#[derive(Debug, Clone, PartialEq)] -pub struct NativeCommand { - pub binary: String, - pub args: Vec, - pub operator: Option, -} - -pub fn split_safe_suffix(mut tokens: Vec) -> (Vec, String) { - let mut suffixes: Vec = Vec::new(); - - loop { - let n = tokens.len(); - let mut matched_len: usize = 0; - let mut matched_suffix = String::new(); - - if n >= 4 { - let t = &tokens[n - 3..]; - if matches!(t[0].kind, TokenKind::Pipe) - && matches!(t[1].kind, TokenKind::Arg) - && t[1].value == "tee" - && matches!(t[2].kind, TokenKind::Arg) - { - matched_suffix = format!("| tee {}", t[2].value); - matched_len = 3; - } - } - - if matched_len == 0 && n >= 4 { - let t = &tokens[n - 3..]; - if matches!(t[0].kind, TokenKind::Pipe) - && matches!(t[1].kind, TokenKind::Arg) - && matches!(t[1].value.as_str(), "head" | "tail") - && matches!(t[2].kind, TokenKind::Arg) - { - matched_suffix = format!("| {} {}", t[1].value, t[2].value); - matched_len = 3; - } - } - - if matched_len == 0 && n >= 3 { - let t = &tokens[n - 2..]; - if matches!(t[0].kind, TokenKind::Redirect) - && t[0].value.starts_with('2') - && t[0].value.contains('>') - && !t[0].value.contains('&') - && matches!(t[1].kind, TokenKind::Arg) - && t[1].value == "/dev/null" - { - matched_suffix = format!("{}{}", t[0].value, t[1].value); - matched_len = 2; - } - } - - if matched_len == 0 && n >= 3 { - let t = &tokens[n - 2..]; - if matches!(t[0].kind, TokenKind::Pipe) - && matches!(t[1].kind, TokenKind::Arg) - && t[1].value == "cat" - { - matched_suffix = "| cat".to_string(); - matched_len = 2; - } - } - - if matched_len == 0 && n >= 3 { - let t = &tokens[n - 2..]; - if matches!(t[0].kind, TokenKind::Redirect) - && t[0].value == ">" - && matches!(t[1].kind, TokenKind::Arg) - && t[1].value == "/dev/null" - { - matched_suffix = "> /dev/null".to_string(); - matched_len = 2; - } - } - - if matched_len == 0 && n >= 3 { - let t = &tokens[n - 2..]; - if matches!(t[0].kind, TokenKind::Redirect) - && t[0].value == ">>" - && matches!(t[1].kind, TokenKind::Arg) - { - matched_suffix = format!(">> {}", t[1].value); - matched_len = 2; - } - } - - if matched_len == 0 && n >= 2 { - let last = &tokens[n - 1]; - if matches!(last.kind, TokenKind::Redirect) && last.value.contains(">&") { - matched_suffix = last.value.clone(); - matched_len = 1; - } - } - - if matched_len == 0 && n >= 2 { - let last = &tokens[n - 1]; - if matches!(last.kind, TokenKind::Shellism) && last.value == "&" { - matched_suffix = "&".to_string(); - matched_len = 1; - } - } - - if matched_len == 0 { - break; - } - - tokens.truncate(n - matched_len); - suffixes.push(matched_suffix); - } - - suffixes.reverse(); - let suffix = suffixes.join(" "); - (tokens, suffix) -} - -pub fn needs_shell(tokens: &[ParsedToken]) -> bool { - tokens.iter().any(|t| { - matches!( - t.kind, - TokenKind::Shellism | TokenKind::Pipe | TokenKind::Redirect - ) - }) -} - -pub fn parse_chain(tokens: Vec) -> Result, String> { - let mut commands = Vec::new(); - let mut current_args = Vec::new(); - - for token in tokens { - match token.kind { - TokenKind::Arg => { - current_args.push(strip_quotes(&token.value)); - } - TokenKind::Operator => { - if current_args.is_empty() { - return Err(format!( - "Syntax error: operator {} with no command", - token.value - )); - } - let binary = current_args.remove(0); - commands.push(NativeCommand { - binary, - args: current_args.clone(), - operator: Some(token.value.clone()), - }); - current_args.clear(); - } - TokenKind::Pipe | TokenKind::Redirect | TokenKind::Shellism => { - return Err(format!( - "Unexpected {:?} in native mode - use passthrough", - token.kind - )); - } - } - } - - if !current_args.is_empty() { - let binary = current_args.remove(0); - commands.push(NativeCommand { - binary, - args: current_args, - operator: None, - }); - } - - Ok(commands) -} - -pub fn should_run(operator: Option<&str>, last_success: bool) -> bool { - match operator { - Some("&&") => last_success, - Some("||") => !last_success, - Some(";") | None => true, - _ => true, - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cmd::lexer::tokenize; - - #[test] - fn test_split_suffix_2_redirect() { - let tokens = tokenize("cargo test 2>&1"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "2>&1"); - assert!(!needs_shell(&core)); - let cmds = parse_chain(core).unwrap(); - assert_eq!(cmds[0].binary, "cargo"); - assert_eq!(cmds[0].args, vec!["test"]); - } - - #[test] - fn test_split_suffix_dev_null() { - let tokens = tokenize("cargo test 2>/dev/null"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "2>/dev/null"); - let cmds = parse_chain(core).unwrap(); - assert_eq!(cmds[0].binary, "cargo"); - } - - #[test] - fn test_split_suffix_stdout_dev_null() { - let tokens = tokenize("cargo test > /dev/null"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "> /dev/null"); - let cmds = parse_chain(core).unwrap(); - assert_eq!(cmds[0].binary, "cargo"); - } - - #[test] - fn test_split_suffix_pipe_tee() { - let tokens = tokenize("cargo test | tee /tmp/log.txt"); - let (core, suffix) = split_safe_suffix(tokens); - assert!(suffix.starts_with("| tee"), "suffix: {suffix}"); - assert!(suffix.contains("/tmp/log.txt"), "suffix: {suffix}"); - let cmds = parse_chain(core).unwrap(); - assert_eq!(cmds[0].binary, "cargo"); - } - - #[test] - fn test_split_suffix_pipe_head() { - let tokens = tokenize("git log | head -20"); - let (core, suffix) = split_safe_suffix(tokens); - assert!(suffix.starts_with("| head"), "suffix: {suffix}"); - let cmds = parse_chain(core).unwrap(); - assert_eq!(cmds[0].binary, "git"); - } - - #[test] - fn test_split_suffix_pipe_tail() { - let tokens = tokenize("git log | tail -10"); - let (_core, suffix) = split_safe_suffix(tokens); - assert!(suffix.starts_with("| tail"), "suffix: {suffix}"); - } - - #[test] - fn test_split_suffix_pipe_cat() { - let tokens = tokenize("ls --color | cat"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "| cat"); - let cmds = parse_chain(core).unwrap(); - assert_eq!(cmds[0].binary, "ls"); - } - - #[test] - fn test_split_suffix_append_redirect() { - let tokens = tokenize("cargo build >> /tmp/build.log"); - let (core, suffix) = split_safe_suffix(tokens); - assert!(suffix.starts_with(">>"), "suffix: {suffix}"); - let cmds = parse_chain(core).unwrap(); - assert_eq!(cmds[0].binary, "cargo"); - } - - #[test] - fn test_split_suffix_none() { - let tokens = tokenize("cargo test"); - let n = tokens.len(); - let (core, suffix) = split_safe_suffix(tokens); - assert!(suffix.is_empty(), "no suffix expected, got: {suffix}"); - assert_eq!(core.len(), n); - } - - #[test] - fn test_split_suffix_glob_core_stays_shellism() { - let tokens = tokenize("ls *.rs 2>&1"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "2>&1"); - assert!(needs_shell(&core)); - } - - #[test] - fn test_split_suffix_requires_core_token() { - let tokens = tokenize("2>&1"); - let (core, suffix) = split_safe_suffix(tokens); - assert!( - suffix.is_empty() || core.is_empty(), - "bare suffix with no core should not produce a valid split" - ); - } - - #[test] - fn test_needs_shell_simple() { - let tokens = tokenize("git status"); - assert!(!needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_with_glob() { - let tokens = tokenize("ls *.rs"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_with_pipe() { - let tokens = tokenize("cat file | grep x"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_with_redirect() { - let tokens = tokenize("cmd > file"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_with_chain() { - let tokens = tokenize("cd dir && git status"); - assert!(!needs_shell(&tokens)); - } - - #[test] - fn test_parse_simple_command() { - let tokens = tokenize("git status"); - let cmds = parse_chain(tokens).unwrap(); - assert_eq!(cmds.len(), 1); - assert_eq!(cmds[0].binary, "git"); - assert_eq!(cmds[0].args, vec!["status"]); - assert_eq!(cmds[0].operator, None); - } - - #[test] - fn test_parse_command_with_multiple_args() { - let tokens = tokenize("git commit -m message"); - let cmds = parse_chain(tokens).unwrap(); - assert_eq!(cmds.len(), 1); - assert_eq!(cmds[0].binary, "git"); - assert_eq!(cmds[0].args, vec!["commit", "-m", "message"]); - } - - #[test] - fn test_parse_chained_and() { - let tokens = tokenize("cd dir && git status"); - let cmds = parse_chain(tokens).unwrap(); - assert_eq!(cmds.len(), 2); - assert_eq!(cmds[0].binary, "cd"); - assert_eq!(cmds[0].args, vec!["dir"]); - assert_eq!(cmds[0].operator, Some("&&".to_string())); - assert_eq!(cmds[1].binary, "git"); - assert_eq!(cmds[1].args, vec!["status"]); - assert_eq!(cmds[1].operator, None); - } - - #[test] - fn test_parse_chained_or() { - let tokens = tokenize("cmd1 || cmd2"); - let cmds = parse_chain(tokens).unwrap(); - assert_eq!(cmds.len(), 2); - assert_eq!(cmds[0].operator, Some("||".to_string())); - } - - #[test] - fn test_parse_chained_semicolon() { - let tokens = tokenize("cmd1 ; cmd2 ; cmd3"); - let cmds = parse_chain(tokens).unwrap(); - assert_eq!(cmds.len(), 3); - assert_eq!(cmds[0].operator, Some(";".to_string())); - assert_eq!(cmds[1].operator, Some(";".to_string())); - assert_eq!(cmds[2].operator, None); - } - - #[test] - fn test_parse_triple_chain() { - let tokens = tokenize("a && b && c"); - let cmds = parse_chain(tokens).unwrap(); - assert_eq!(cmds.len(), 3); - } - - #[test] - fn test_parse_operator_at_start() { - let tokens = tokenize("&& cmd"); - let result = parse_chain(tokens); - assert!(result.is_err()); - } - - #[test] - fn test_parse_operator_at_end() { - let tokens = tokenize("cmd &&"); - let cmds = parse_chain(tokens).unwrap(); - assert_eq!(cmds.len(), 1); - assert_eq!(cmds[0].operator, Some("&&".to_string())); - } - - #[test] - fn test_parse_quoted_arg() { - let tokens = tokenize("git commit -m \"Fix && Bug\""); - let cmds = parse_chain(tokens).unwrap(); - assert_eq!(cmds.len(), 1); - assert_eq!(cmds[0].args.len(), 3); - assert_eq!(cmds[0].args[2], "Fix && Bug"); - } - - #[test] - fn test_parse_empty() { - let tokens = tokenize(""); - let cmds = parse_chain(tokens).unwrap(); - assert!(cmds.is_empty()); - } - - #[test] - fn test_needs_shell_find_piped_to_grep() { - let tokens = tokenize("find . -name \"*.rs\" | grep pattern"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_rg_piped_to_head() { - let tokens = tokenize("rg pattern src/ | head -20"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_grep_with_redirect() { - let tokens = tokenize("grep -r pattern . > results.txt"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_find_with_glob_arg() { - let tokens = tokenize("find . -name *.rs"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_quoted_pipe_in_grep_arg_no_shell() { - let tokens = tokenize("grep \"a|b\" src/"); - assert!(!needs_shell(&tokens)); - } - - #[test] - fn test_parse_chain_find_with_quoted_name() { - let tokens = tokenize("find . -name \"*.rs\""); - assert!(!needs_shell(&tokens)); - let cmds = parse_chain(tokens).unwrap(); - assert_eq!(cmds[0].binary, "find"); - assert!(cmds[0].args.contains(&"-name".to_string())); - assert!( - cmds[0].args.iter().any(|a| a == "*.rs"), - "quoted glob stripped to bare glob in args: {:?}", - cmds[0].args - ); - } - - #[test] - fn test_parse_chain_grep_native_no_pipe() { - let tokens = tokenize("grep pattern file.rs"); - assert!(!needs_shell(&tokens)); - let cmds = parse_chain(tokens).unwrap(); - assert_eq!(cmds[0].binary, "grep"); - assert_eq!(cmds[0].args, vec!["pattern", "file.rs"]); - } - - #[test] - fn test_should_run_and_success() { - assert!(should_run(Some("&&"), true)); - } - - #[test] - fn test_should_run_and_failure() { - assert!(!should_run(Some("&&"), false)); - } - - #[test] - fn test_should_run_or_success() { - assert!(!should_run(Some("||"), true)); - } - - #[test] - fn test_should_run_or_failure() { - assert!(should_run(Some("||"), false)); - } - - #[test] - fn test_should_run_semicolon() { - assert!(should_run(Some(";"), true)); - assert!(should_run(Some(";"), false)); - } - - #[test] - fn test_should_run_none() { - assert!(should_run(None, true)); - assert!(should_run(None, false)); - } - - #[test] - fn test_needs_shell_redirect_to_dev_null() { - let tokens = tokenize("cmd > /dev/null"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_stderr_to_dev_null() { - let tokens = tokenize("cmd 2>/dev/null"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_stderr_to_dev_null_spaced() { - let tokens = tokenize("cmd 2> /dev/null"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_stderr_to_stdout() { - let tokens = tokenize("cmd 2>&1"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_stdout_to_stderr() { - let tokens = tokenize("cmd 1>&2"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_combined_redirect_chain() { - let tokens = tokenize("cmd > /dev/null 2>&1"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_redirect_append() { - let tokens = tokenize("cmd >> /tmp/output.txt"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_stderr_redirect_to_file() { - let tokens = tokenize("cmd 2> /tmp/err.log"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_pipe_to_tail() { - let tokens = tokenize("git log | tail -20"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_pipe_to_cat() { - let tokens = tokenize("ls --color | cat"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_pipe_to_tee() { - let tokens = tokenize("cargo build 2>&1 | tee /tmp/build.log"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_needs_shell_pipe_to_wc() { - let tokens = tokenize("find . -name '*.rs' | wc -l"); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_operator_and_does_not_trigger_shell() { - let tokens = tokenize("cargo fmt && cargo clippy"); - assert!(!needs_shell(&tokens)); - } - - #[test] - fn test_operator_or_does_not_trigger_shell() { - let tokens = tokenize("cargo test || true"); - assert!(!needs_shell(&tokens)); - } - - #[test] - fn test_operator_semicolon_does_not_trigger_shell() { - let tokens = tokenize("true ; false"); - assert!(!needs_shell(&tokens)); - } - - #[test] - fn test_redirect_suffix_is_passed_through_verbatim() { - let raw = "cargo test 2>&1 | tee /tmp/test.log"; - let tokens = tokenize(raw); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_background_job_suffix_simple() { - let tokens = tokenize("cargo build &"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "&"); - assert_eq!(core.len(), 2); - assert!(!needs_shell(&core)); - } - - #[test] - fn test_background_job_suffix_git_status() { - let tokens = tokenize("git status &"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "&"); - assert_eq!(core.len(), 2); - assert!(!needs_shell(&core)); - } - - #[test] - fn test_background_job_suffix_with_fd_redirect() { - // With the current lexer, 2>&1 is a single Redirect token (no Shellism), - // so both 2>&1 and & are safely stripped as independent suffixes - let tokens = tokenize("cargo build 2>&1 &"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "2>&1 &"); - assert!(!needs_shell(&core)); - } - - #[test] - fn test_background_job_suffix_single_token_not_stripped() { - let tokens = tokenize("&"); - let (core, suffix) = split_safe_suffix(tokens); - assert!(suffix.is_empty()); - assert_eq!(core.len(), 1); - } - - #[test] - fn test_cargo_test_pipe_grep_is_not_safe_suffix() { - let tokens = tokenize("cargo test | grep FAILED"); - let (_core, suffix) = split_safe_suffix(tokens.clone()); - assert!(suffix.is_empty()); - assert!(needs_shell(&tokens)); - } - - #[test] - fn test_nohup_background_strips_ampersand() { - let tokens = tokenize("nohup cargo build &"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "&"); - assert_eq!(core[0].value, "nohup"); - assert_eq!(core.len(), 3); - assert!(!needs_shell(&core)); - } - - #[test] - fn test_split_suffix_compound_redirect_pipe_tail() { - let tokens = tokenize("cargo test 2>&1 | tail -50"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "2>&1 | tail -50"); - assert!(!needs_shell(&core)); - let cmds = parse_chain(core).expect("core must parse"); - assert_eq!(cmds[0].binary, "cargo"); - assert_eq!(cmds[0].args, vec!["test"]); - } - - #[test] - fn test_split_suffix_compound_devnull_redirect() { - let tokens = tokenize("cmd > /dev/null 2>&1"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "> /dev/null 2>&1"); - assert!(!needs_shell(&core)); - assert_eq!(core.len(), 1); - } - - #[test] - fn test_split_suffix_compound_redirect_pipe_tee() { - let tokens = tokenize("cargo build 2>&1 | tee /tmp/log"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "2>&1 | tee /tmp/log"); - assert!(!needs_shell(&core)); - } - - #[test] - fn test_split_suffix_triple_compound() { - let tokens = tokenize("cmd >> /tmp/log 2>&1 | tail -5"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, ">> /tmp/log 2>&1 | tail -5"); - assert!(!needs_shell(&core)); - assert_eq!(core.len(), 1); - } - - #[test] - fn test_split_suffix_unsafe_pipe_with_redirect_not_stripped() { - let tokens = tokenize("cargo test | grep FAILED 2>&1"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "2>&1"); - assert!(needs_shell(&core)); - } - - #[test] - fn test_split_suffix_devnull_background() { - let tokens = tokenize("cargo build > /dev/null &"); - let (core, suffix) = split_safe_suffix(tokens); - assert_eq!(suffix, "> /dev/null &"); - assert!(!needs_shell(&core)); - } -} diff --git a/src/cmd/builtins.rs b/src/cmd/builtins.rs deleted file mode 100644 index 17ec4d1b9..000000000 --- a/src/cmd/builtins.rs +++ /dev/null @@ -1,258 +0,0 @@ -use super::predicates::{expand_tilde, get_home}; -use anyhow::{Context, Result}; - -pub fn builtin_cd(args: &[String]) -> Result { - let target = args - .first() - .map(|s| expand_tilde(s)) - .unwrap_or_else(get_home); - - std::env::set_current_dir(&target) - .with_context(|| format!("cd: {}: No such file or directory", target))?; - - Ok(true) -} - -fn is_valid_env_name(name: &str) -> bool { - let mut chars = name.chars(); - matches!(chars.next(), Some(c) if c.is_ascii_alphabetic() || c == '_') - && chars.all(|c| c.is_ascii_alphanumeric() || c == '_') -} - -pub fn builtin_export(args: &[String]) -> Result { - for arg in args { - if let Some((key, value)) = arg.split_once('=') { - if !is_valid_env_name(key) { - continue; - } - let clean_value = value - .strip_prefix('"') - .and_then(|v| v.strip_suffix('"')) - .or_else(|| value.strip_prefix('\'').and_then(|v| v.strip_suffix('\''))) - .unwrap_or(value); - std::env::set_var(key, clean_value); - } - } - Ok(true) -} - -pub fn is_builtin(binary: &str) -> bool { - matches!( - binary, - "cd" | "export" | "pwd" | "echo" | "true" | "false" | ":" - ) -} - -pub fn execute(binary: &str, args: &[String]) -> Result { - match binary { - "cd" => builtin_cd(args), - "export" => builtin_export(args), - "pwd" => { - println!("{}", std::env::current_dir()?.display()); - Ok(true) - } - "echo" => { - let (print_args, no_newline) = if args.first().map(|s| s.as_str()) == Some("-n") { - (&args[1..], true) - } else { - (args, false) - }; - print!("{}", print_args.join(" ")); - if !no_newline { - println!(); - } - Ok(true) - } - "true" | ":" => Ok(true), - "false" => Ok(false), - _ => anyhow::bail!("Unknown builtin: {}", binary), - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::env; - - #[test] - fn test_cd_all_cases() { - let original = env::current_dir().unwrap(); - let home = get_home(); - - let result = builtin_cd(&["/tmp".to_string()]).unwrap(); - assert!(result); - let new_dir = env::current_dir().unwrap(); - let canon_tmp = std::fs::canonicalize("/tmp").unwrap(); - let canon_new = std::fs::canonicalize(&new_dir).unwrap(); - assert_eq!(canon_new, canon_tmp, "cd /tmp should land in /tmp"); - - let result = builtin_cd(&["/nonexistent/path/xyz".to_string()]); - assert!(result.is_err()); - assert_eq!( - std::fs::canonicalize(env::current_dir().unwrap()).unwrap(), - canon_tmp - ); - - let result = builtin_cd(&[]).unwrap(); - assert!(result); - let cwd = env::current_dir().unwrap(); - let canon_home = std::fs::canonicalize(&home).unwrap(); - let canon_cwd = std::fs::canonicalize(&cwd).unwrap(); - assert_eq!(canon_cwd, canon_home, "cd with no args should go home"); - - let _ = env::set_current_dir("/tmp"); - let result = builtin_cd(&["~".to_string()]).unwrap(); - assert!(result); - let cwd = std::fs::canonicalize(env::current_dir().unwrap()).unwrap(); - assert_eq!(cwd, canon_home, "cd ~ should go home"); - - let _ = builtin_cd(&["~/nonexistent_rtk_test_subpath_xyz".to_string()]); - - let _ = env::set_current_dir(&original); - } - - #[test] - fn test_export_simple() { - builtin_export(&["RTK_TEST_SIMPLE=value".to_string()]).unwrap(); - assert_eq!(env::var("RTK_TEST_SIMPLE").unwrap(), "value"); - env::remove_var("RTK_TEST_SIMPLE"); - } - - #[test] - fn test_export_with_equals_in_value() { - builtin_export(&["RTK_TEST_EQUALS=key=value".to_string()]).unwrap(); - assert_eq!(env::var("RTK_TEST_EQUALS").unwrap(), "key=value"); - env::remove_var("RTK_TEST_EQUALS"); - } - - #[test] - fn test_export_quoted_value() { - builtin_export(&["RTK_TEST_QUOTED=\"hello world\"".to_string()]).unwrap(); - assert_eq!(env::var("RTK_TEST_QUOTED").unwrap(), "hello world"); - env::remove_var("RTK_TEST_QUOTED"); - } - - #[test] - fn test_export_multiple() { - builtin_export(&["RTK_TEST_A=1".to_string(), "RTK_TEST_B=2".to_string()]).unwrap(); - assert_eq!(env::var("RTK_TEST_A").unwrap(), "1"); - assert_eq!(env::var("RTK_TEST_B").unwrap(), "2"); - env::remove_var("RTK_TEST_A"); - env::remove_var("RTK_TEST_B"); - } - - #[test] - fn test_export_no_equals() { - let result = builtin_export(&["NO_EQUALS_HERE".to_string()]).unwrap(); - assert!(result); - } - - #[test] - fn test_export_invalid_identifier_ignored() { - let result = builtin_export(&["123=x".to_string()]).unwrap(); - assert!( - result, - "builtin_export must succeed even with invalid identifier" - ); - assert!( - env::var("123").is_err(), - "var with numeric-start name must not be set" - ); - } - - #[test] - fn test_export_empty_name_ignored() { - let result = builtin_export(&["=x".to_string()]).unwrap(); - assert!(result); - } - - #[test] - fn test_is_valid_env_name() { - assert!(is_valid_env_name("FOO")); - assert!(is_valid_env_name("_FOO")); - assert!(is_valid_env_name("foo_bar_123")); - assert!(!is_valid_env_name("123foo")); - assert!(!is_valid_env_name("")); - assert!(!is_valid_env_name("foo-bar")); - assert!(!is_valid_env_name("foo bar")); - } - - #[test] - fn test_is_builtin_cd() { - assert!(is_builtin("cd")); - } - - #[test] - fn test_is_builtin_export() { - assert!(is_builtin("export")); - } - - #[test] - fn test_is_builtin_pwd() { - assert!(is_builtin("pwd")); - } - - #[test] - fn test_is_builtin_echo() { - assert!(is_builtin("echo")); - } - - #[test] - fn test_is_builtin_true() { - assert!(is_builtin("true")); - } - - #[test] - fn test_is_builtin_false() { - assert!(is_builtin("false")); - } - - #[test] - fn test_is_builtin_external() { - assert!(!is_builtin("git")); - assert!(!is_builtin("ls")); - assert!(!is_builtin("cargo")); - } - - #[test] - fn test_execute_pwd() { - let result = execute("pwd", &[]).unwrap(); - assert!(result); - } - - #[test] - fn test_execute_echo() { - let result = execute("echo", &["hello".to_string(), "world".to_string()]).unwrap(); - assert!(result); - } - - #[test] - fn test_execute_true() { - let result = execute("true", &[]).unwrap(); - assert!(result); - } - - #[test] - fn test_execute_false() { - let result = execute("false", &[]).unwrap(); - assert!(!result); - } - - #[test] - fn test_execute_unknown_builtin() { - let result = execute("notabuiltin", &[]); - assert!(result.is_err()); - } - - #[test] - fn test_execute_echo_n_flag() { - let result = execute("echo", &["-n".to_string(), "hello".to_string()]).unwrap(); - assert!(result); - } - - #[test] - fn test_execute_echo_empty_args() { - let result = execute("echo", &[]).unwrap(); - assert!(result); - } -} diff --git a/src/cmd/exec.rs b/src/cmd/exec.rs deleted file mode 100644 index 89901568e..000000000 --- a/src/cmd/exec.rs +++ /dev/null @@ -1,472 +0,0 @@ -use anyhow::{Context, Result}; -use std::process::Command; - -use super::{analysis, builtins, filters, lexer}; -use crate::core::stream::{FilterMode, LineFilter, StdinMode}; -use crate::core::tracking; - -fn is_rtk_active() -> bool { - std::env::var("RTK_ACTIVE").is_ok() -} - -struct RtkActiveGuard; - -impl RtkActiveGuard { - fn new() -> Self { - std::env::set_var("RTK_ACTIVE", "1"); - RtkActiveGuard - } -} - -impl Drop for RtkActiveGuard { - fn drop(&mut self) { - std::env::remove_var("RTK_ACTIVE"); - } -} - -pub fn execute(raw: &str, verbose: u8) -> Result { - if is_rtk_active() { - if verbose > 0 { - eprintln!("rtk: Recursion detected, passing through"); - } - return run_passthrough(raw, verbose); - } - - if raw.trim().is_empty() { - return Ok(0); - } - - let _guard = RtkActiveGuard::new(); - execute_inner(raw, verbose) -} - -fn execute_inner(raw: &str, verbose: u8) -> Result { - let tokens = lexer::tokenize(raw); - - if analysis::needs_shell(&tokens) { - return run_passthrough(raw, verbose); - } - - let commands = - analysis::parse_chain(tokens).map_err(|e| anyhow::anyhow!("Parse error: {}", e))?; - - run_native(&commands, verbose) -} - -fn run_native(commands: &[analysis::NativeCommand], verbose: u8) -> Result { - let mut last_exit: i32 = 0; - let mut prev_operator: Option<&str> = None; - - for cmd in commands { - if !analysis::should_run(prev_operator, last_exit == 0) { - prev_operator = cmd.operator.as_deref(); - continue; - } - - // ISSUE #917: flatten nested rtk run to prevent recursion - if cmd.binary == "rtk" && cmd.args.first().map(|s| s.as_str()) == Some("run") { - let inner = if cmd.args.get(1).map(|s| s.as_str()) == Some("-c") { - cmd.args.get(2).cloned().unwrap_or_default() - } else { - cmd.args.get(1).cloned().unwrap_or_default() - }; - if verbose > 0 { - eprintln!("rtk: Flattening nested rtk run"); - } - return execute(&inner, verbose); - } - - if builtins::is_builtin(&cmd.binary) { - let ok = builtins::execute(&cmd.binary, &cmd.args)?; - last_exit = if ok { 0 } else { 1 }; - prev_operator = cmd.operator.as_deref(); - continue; - } - - last_exit = spawn_with_filter(&cmd.binary, &cmd.args, verbose)?; - prev_operator = cmd.operator.as_deref(); - } - - Ok(last_exit) -} - -fn spawn_with_filter(binary: &str, args: &[String], verbose: u8) -> Result { - let timer = tracking::TimedExecution::start(); - - if verbose > 1 { - eprintln!( - "[rtk exec] binary={} interactive={} unstaged={}", - binary, - super::predicates::is_interactive(), - super::predicates::has_unstaged_changes(), - ); - } - - let binary_path = match which::which(binary) { - Ok(path) => path, - Err(_) => { - eprintln!("rtk: {}: command not found", binary); - return Ok(127); - } - }; - - let mut cmd = Command::new(&binary_path); - cmd.args(args); - - let mode = filters::get_filter_mode(binary); - let result = crate::core::stream::run_streaming(&mut cmd, StdinMode::Inherit, mode) - .with_context(|| format!("Failed to execute: {}", binary))?; - - let orig_cmd = if args.is_empty() { - binary.to_string() - } else { - format!("{} {}", binary, args.join(" ")) - }; - - let rtk_cmd = if binary == "rtk" { - if args.is_empty() { - "rtk".to_string() - } else { - format!("rtk {}", args.join(" ")) - } - } else { - let native_cmd = analysis::NativeCommand { - binary: binary.to_string(), - args: args.to_vec(), - operator: None, - }; - match super::hook::try_route_native_command(&native_cmd, &orig_cmd) { - Some(routed) => routed, - None => format!("rtk run {}", orig_cmd), - } - }; - timer.track(&orig_cmd, &rtk_cmd, &result.raw, &result.filtered); - - Ok(result.exit_code) -} - -pub fn run_passthrough(raw: &str, verbose: u8) -> Result { - if verbose > 0 { - eprintln!("rtk: Passthrough mode for complex command"); - } - - let timer = tracking::TimedExecution::start(); - - let shell = if cfg!(windows) { "cmd" } else { "sh" }; - let flag = if cfg!(windows) { "/C" } else { "-c" }; - - let mut cmd = Command::new(shell); - cmd.arg(flag).arg(raw); - - let filter = LineFilter::new(|l| Some(format!("{}\n", crate::core::utils::strip_ansi(l)))); - let result = crate::core::stream::run_streaming( - &mut cmd, - StdinMode::Inherit, - FilterMode::Streaming(Box::new(filter)), - ) - .context("Failed to execute passthrough")?; - - timer.track( - raw, - &format!("rtk passthrough {}", raw), - &result.raw, - &result.filtered, - ); - - Ok(result.exit_code) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cmd::hook; - use crate::cmd::test_helpers::EnvGuard; - - #[test] - fn test_is_rtk_active_default() { - let _env = EnvGuard::new(); - assert!(!is_rtk_active()); - } - - #[test] - fn test_raii_guard_sets_and_clears() { - let _env = EnvGuard::new(); - { - let _guard = RtkActiveGuard::new(); - assert!(is_rtk_active()); - } - assert!( - !is_rtk_active(), - "RTK_ACTIVE must be cleared when guard drops" - ); - } - - #[test] - fn test_raii_guard_clears_on_panic() { - let _env = EnvGuard::new(); - let result = std::panic::catch_unwind(|| { - let _guard = RtkActiveGuard::new(); - assert!(is_rtk_active()); - panic!("simulated panic"); - }); - assert!(result.is_err()); - assert!( - !is_rtk_active(), - "RTK_ACTIVE must be cleared even after panic" - ); - } - - #[test] - fn test_execute_empty() { - assert_eq!(execute("", 0).unwrap(), 0); - } - - #[test] - fn test_execute_whitespace_only() { - assert_eq!(execute(" ", 0).unwrap(), 0); - } - - #[test] - fn test_execute_simple_command() { - assert_eq!(execute("echo hello", 0).unwrap(), 0); - } - - #[test] - fn test_execute_builtin_cd() { - let original = std::env::current_dir().unwrap(); - assert_eq!(execute("cd /tmp", 0).unwrap(), 0); - let _ = std::env::set_current_dir(&original); - } - - #[test] - fn test_execute_builtin_pwd() { - assert_eq!(execute("pwd", 0).unwrap(), 0); - } - - #[test] - fn test_execute_builtin_true() { - assert_eq!(execute("true", 0).unwrap(), 0); - } - - #[test] - fn test_execute_builtin_false() { - assert_ne!(execute("false", 0).unwrap(), 0); - } - - #[test] - fn test_execute_chain_and_success() { - assert_eq!(execute("true && echo success", 0).unwrap(), 0); - } - - #[test] - fn test_execute_chain_and_failure() { - assert_ne!(execute("false && echo should_not_run", 0).unwrap(), 0); - } - - #[test] - fn test_execute_chain_or_success() { - assert_eq!(execute("true || echo should_not_run", 0).unwrap(), 0); - } - - #[test] - fn test_execute_chain_or_failure() { - assert_eq!(execute("false || echo fallback", 0).unwrap(), 0); - } - - #[test] - fn test_execute_chain_semicolon() { - assert_ne!(execute("true ; false", 0).unwrap(), 0); - } - - #[test] - fn test_execute_passthrough_for_glob() { - assert_eq!(execute("echo *", 0).unwrap(), 0); - } - - #[test] - fn test_execute_passthrough_for_pipe() { - assert_eq!(execute("echo hello | cat", 0).unwrap(), 0); - } - - #[test] - fn test_execute_quoted_operator() { - assert_eq!(execute(r#"echo "hello && world""#, 0).unwrap(), 0); - } - - #[test] - fn test_execute_binary_not_found() { - assert_eq!(execute("nonexistent_command_xyz_123", 0).unwrap(), 127); - } - - #[test] - fn test_execute_chain_and_three_commands() { - assert_ne!(execute("true && false && true", 0).unwrap(), 0); - } - - #[test] - fn test_execute_chain_semicolon_last_wins() { - assert_eq!(execute("false ; true", 0).unwrap(), 0); - } - - #[test] - fn test_chain_mixed_operators() { - assert_eq!(execute("false || true && echo works", 0).unwrap(), 0); - } - - #[test] - fn test_passthrough_redirect() { - assert_eq!(execute("echo test > /dev/null", 0).unwrap(), 0); - } - - #[test] - fn test_integration_cd_tilde() { - let original = std::env::current_dir().unwrap(); - assert_eq!(execute("cd ~", 0).unwrap(), 0); - let _ = std::env::set_current_dir(&original); - } - - #[test] - fn test_integration_export() { - assert_eq!(execute("export TEST_VAR=value", 0).unwrap(), 0); - std::env::remove_var("TEST_VAR"); - } - - #[test] - fn test_integration_env_prefix() { - let result = execute("TEST=1 echo hello", 0); - assert!(result.is_ok()); - } - - #[test] - fn test_integration_dash_args() { - assert_eq!(execute("echo --help -v --version", 0).unwrap(), 0); - } - - #[test] - fn test_integration_quoted_empty() { - assert_eq!(execute(r#"echo """#, 0).unwrap(), 0); - } - - #[test] - fn test_execute_rtk_recursion() { - let result = execute("rtk run \"echo hello\"", 0); - assert!(result.is_ok()); - } - - #[test] - fn test_execute_returns_real_exit_code() { - let code = execute("sh -c \"exit 42\"", 0).unwrap(); - assert_eq!(code, 42, "exit code must be propagated exactly"); - } - - #[test] - fn test_execute_success_returns_zero() { - assert_eq!(execute("true", 0).unwrap(), 0); - } - - #[test] - fn test_run_native_and_chain_exit_code() { - assert_ne!(execute("true && false", 0).unwrap(), 0); - } - - fn compute_rtk_cmd_label(binary: &str, args: &[&str]) -> String { - let native_cmd = analysis::NativeCommand { - binary: binary.to_string(), - args: args.iter().map(|s| s.to_string()).collect(), - operator: None, - }; - let orig_cmd = if args.is_empty() { - binary.to_string() - } else { - format!("{} {}", binary, args.join(" ")) - }; - - if binary == "rtk" { - if args.is_empty() { - "rtk".to_string() - } else { - format!("rtk {}", args.join(" ")) - } - } else { - match hook::try_route_native_command(&native_cmd, &orig_cmd) { - Some(routed) => routed, - None => format!("rtk run {}", orig_cmd), - } - } - } - - #[test] - fn test_tracking_routed_command_uses_rtk_prefix() { - let label = compute_rtk_cmd_label("ls", &["-F"]); - assert!( - label == "rtk ls -F", - "Expected 'rtk ls -F', got '{}'", - label - ); - } - - #[test] - fn test_tracking_git_status_uses_rtk_git() { - let label = compute_rtk_cmd_label("git", &["status"]); - assert!( - label == "rtk git status", - "Expected 'rtk git status', got '{}'", - label - ); - } - - #[test] - fn test_tracking_cargo_test_uses_rtk_cargo() { - let label = compute_rtk_cmd_label("cargo", &["test"]); - assert!( - label == "rtk cargo test", - "Expected 'rtk cargo test', got '{}'", - label - ); - } - - #[test] - fn test_tracking_unknown_command_uses_rtk_run() { - let label = compute_rtk_cmd_label("python3", &["--version"]); - assert!( - label == "rtk run python3 --version", - "Expected 'rtk run python3 --version', got '{}'", - label - ); - } - - #[test] - fn test_tracking_rtk_self_reference_no_double_rtk() { - let label = compute_rtk_cmd_label("rtk", &["git", "status"]); - assert!( - label == "rtk git status", - "Expected 'rtk git status', got '{}'", - label - ); - assert!( - !label.contains("rtk run rtk"), - "Should NOT contain 'rtk run rtk', got '{}'", - label - ); - } - - #[test] - fn test_tracking_find_uses_rtk_run() { - let label = compute_rtk_cmd_label("find", &[".", "-name", "*.rs"]); - assert!( - label.starts_with("rtk run"), - "Expected 'rtk run ...' (find not in ROUTES), got '{}'", - label - ); - } - - #[test] - fn test_tracking_grep_uses_rtk_grep() { - let label = compute_rtk_cmd_label("grep", &["-r", "pattern"]); - assert!( - label.starts_with("rtk grep"), - "Expected 'rtk grep ...', got '{}'", - label - ); - } -} diff --git a/src/cmd/filters.rs b/src/cmd/filters.rs deleted file mode 100644 index d16eb295c..000000000 --- a/src/cmd/filters.rs +++ /dev/null @@ -1,326 +0,0 @@ -use crate::core::stream::{FilterMode, LineFilter}; -use crate::core::utils; - -fn filter_cargo_output(output: &str) -> String { - output - .lines() - .filter(|line| { - let line = line.trim(); - !line.starts_with("Compiling ") || line.contains("error") || line.contains("warning") - }) - .collect::>() - .join("\n") -} - -fn filter_test_output(output: &str) -> String { - output - .lines() - .filter(|line| { - let line = line.trim(); - line.contains("FAILED") - || line.contains("error") - || line.contains("Error") - || line.contains("failed") - || line.contains("test result:") - || line.starts_with("----") - }) - .collect::>() - .join("\n") -} - -pub fn get_filter_mode(binary: &str) -> FilterMode { - match binary { - "ls" | "find" | "grep" | "rg" | "fd" => { - FilterMode::Streaming(Box::new(LineFilter::new(|l| { - let stripped = utils::strip_ansi(l); - let truncated = if stripped.len() > 120 { - format!("{}...", &stripped[..117]) - } else { - stripped - }; - Some(format!("{}\n", truncated)) - }))) - } - "cargo" => FilterMode::Buffered(filter_cargo_output), - "pytest" | "jest" | "mocha" | "vitest" | "mypy" | "ruff" | "golangci-lint" => { - FilterMode::Buffered(filter_test_output) - } - "git" => FilterMode::Streaming(Box::new(LineFilter::new(|l| { - Some(format!("{}\n", utils::strip_ansi(l))) - }))), - "npm" | "npx" | "pnpm" => FilterMode::Streaming(Box::new(LineFilter::new(|l| { - Some(format!("{}\n", utils::strip_ansi(l))) - }))), - _ => FilterMode::Passthrough, - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_strip_ansi_no_codes() { - assert_eq!(utils::strip_ansi("hello world"), "hello world"); - } - - #[test] - fn test_strip_ansi_color() { - assert_eq!(utils::strip_ansi("\x1b[32mgreen\x1b[0m"), "green"); - } - - #[test] - fn test_strip_ansi_bold() { - assert_eq!(utils::strip_ansi("\x1b[1mbold\x1b[0m"), "bold"); - } - - #[test] - fn test_strip_ansi_multiple() { - assert_eq!( - utils::strip_ansi("\x1b[31mred\x1b[0m \x1b[32mgreen\x1b[0m"), - "red green" - ); - } - - #[test] - fn test_strip_ansi_complex() { - assert_eq!( - utils::strip_ansi("\x1b[1;31;42mbold red on green\x1b[0m"), - "bold red on green" - ); - } - - #[test] - fn test_filter_cargo_keeps_errors() { - let input = "Compiling dep1\nerror: something wrong\nCompiling dep2"; - let output = filter_cargo_output(input); - assert!(output.contains("error")); - assert!(!output.contains("Compiling dep1")); - } - - #[test] - fn test_filter_cargo_keeps_warnings() { - let input = "Compiling dep1\nwarning: unused variable\nCompiling dep2"; - let output = filter_cargo_output(input); - assert!(output.contains("warning")); - } - - #[test] - fn test_filter_test_keeps_failures() { - let input = "test foo ... ok\ntest bar ... FAILED\ntest result: 1 passed; 1 failed"; - let output = filter_test_output(input); - assert!(output.contains("FAILED")); - assert!(output.contains("test result:")); - assert!(!output.contains("test foo")); - } - - fn truncate_lines(output: &str, max_lines: usize) -> String { - let lines: Vec<&str> = output.lines().collect(); - if lines.len() <= max_lines { - output.to_string() - } else { - let truncated: Vec<&str> = lines.iter().take(max_lines).copied().collect(); - format!( - "{}\n... ({} more lines)", - truncated.join("\n"), - lines.len() - max_lines - ) - } - } - - #[test] - fn test_truncate_short() { - let input = "line1\nline2\nline3"; - let output = truncate_lines(input, 10); - assert_eq!(output, input); - } - - #[test] - fn test_truncate_long() { - let input = "line1\nline2\nline3\nline4\nline5"; - let output = truncate_lines(input, 3); - assert!(output.contains("line3")); - assert!(!output.contains("line4")); - assert!(output.contains("2 more lines")); - } - - #[test] - fn test_get_filter_mode_grep_is_streaming() { - assert!(matches!(get_filter_mode("grep"), FilterMode::Streaming(_))); - } - - #[test] - fn test_get_filter_mode_rg_is_streaming() { - assert!(matches!(get_filter_mode("rg"), FilterMode::Streaming(_))); - } - - #[test] - fn test_get_filter_mode_find_is_streaming() { - assert!(matches!(get_filter_mode("find"), FilterMode::Streaming(_))); - } - - #[test] - fn test_get_filter_mode_fd_is_streaming() { - assert!(matches!(get_filter_mode("fd"), FilterMode::Streaming(_))); - } - - #[test] - fn test_get_filter_mode_ls_is_streaming() { - assert!(matches!(get_filter_mode("ls"), FilterMode::Streaming(_))); - } - - #[test] - fn test_get_filter_mode_cargo_is_buffered() { - assert!(matches!(get_filter_mode("cargo"), FilterMode::Buffered(_))); - } - - #[test] - fn test_get_filter_mode_mypy_is_buffered() { - assert!(matches!(get_filter_mode("mypy"), FilterMode::Buffered(_))); - } - - #[test] - fn test_get_filter_mode_ruff_is_buffered() { - assert!(matches!(get_filter_mode("ruff"), FilterMode::Buffered(_))); - } - - #[test] - fn test_get_filter_mode_golangci_lint_is_buffered() { - assert!(matches!( - get_filter_mode("golangci-lint"), - FilterMode::Buffered(_) - )); - } - - #[test] - fn test_get_filter_mode_npm_is_streaming() { - assert!(matches!(get_filter_mode("npm"), FilterMode::Streaming(_))); - } - - #[test] - fn test_get_filter_mode_pnpm_is_streaming() { - assert!(matches!(get_filter_mode("pnpm"), FilterMode::Streaming(_))); - } - - #[test] - fn test_get_filter_mode_git_is_streaming() { - assert!(matches!(get_filter_mode("git"), FilterMode::Streaming(_))); - } - - #[test] - fn test_get_filter_mode_unknown_is_passthrough() { - assert!(matches!( - get_filter_mode("unknowncmd"), - FilterMode::Passthrough - )); - } - - #[test] - fn test_get_filter_mode_grep_strips_ansi_and_emits() { - let mut mode = get_filter_mode("grep"); - if let FilterMode::Streaming(ref mut filter) = mode { - let result = filter.feed_line("\x1b[32msrc/main.rs:42:fn main\x1b[0m"); - assert!(result.is_some(), "streaming filter must emit a line"); - let out = result.unwrap(); - assert!( - out.contains("src/main.rs"), - "ANSI stripped, path preserved: {}", - out - ); - assert!( - !out.contains("\x1b["), - "ANSI codes must be stripped: {}", - out - ); - } else { - panic!("Expected FilterMode::Streaming for 'grep'"); - } - } - - #[test] - fn test_get_filter_mode_find_truncates_long_lines() { - let long_line = "a".repeat(200); - let mut mode = get_filter_mode("find"); - if let FilterMode::Streaming(ref mut filter) = mode { - let result = filter.feed_line(&long_line); - assert!(result.is_some()); - let out = result.unwrap(); - assert!( - out.len() <= 125, - "line must be truncated: len={}", - out.len() - ); - assert!(out.contains("..."), "truncated line must contain '...'"); - } else { - panic!("Expected FilterMode::Streaming for 'find'"); - } - } - - #[test] - fn test_get_filter_mode_rg_short_line_passes_through() { - let short_line = "src/foo.rs:10:hello"; - let mut mode = get_filter_mode("rg"); - if let FilterMode::Streaming(ref mut filter) = mode { - let result = filter.feed_line(short_line); - assert!(result.is_some()); - let out = result.unwrap(); - assert!(out.contains("src/foo.rs"), "out={}", out); - } else { - panic!("Expected FilterMode::Streaming for 'rg'"); - } - } - - #[test] - fn test_get_filter_mode_go_is_passthrough() { - assert!(matches!(get_filter_mode("go"), FilterMode::Passthrough)); - } - - #[test] - fn test_get_filter_mode_npx_is_streaming() { - assert!(matches!(get_filter_mode("npx"), FilterMode::Streaming(_))); - } - - #[test] - fn test_get_filter_mode_npm_strips_ansi() { - let mut mode = get_filter_mode("npm"); - if let FilterMode::Streaming(ref mut filter) = mode { - let result = filter.feed_line("\x1b[33mWARN\x1b[0m deprecated package"); - assert!(result.is_some()); - let out = result.unwrap(); - assert!(out.contains("WARN"), "content preserved: {}", out); - assert!(!out.contains("\x1b["), "ANSI codes stripped: {}", out); - } else { - panic!("Expected FilterMode::Streaming for 'npm'"); - } - } - - #[test] - fn test_filter_test_output_no_failures_returns_empty() { - let input = "test foo ... ok\ntest bar ... ok\ntest baz ... ok"; - let output = filter_test_output(input); - assert!( - output.is_empty(), - "all-passing tests should produce empty output" - ); - } - - #[test] - fn test_filter_cargo_output_only_compiling() { - let input = "Compiling dep1\nCompiling dep2\nCompiling dep3"; - let output = filter_cargo_output(input); - assert!( - output.is_empty() || output.trim().is_empty(), - "pure Compiling output should be filtered out" - ); - } - - #[test] - fn test_filter_test_output_keeps_separator_lines() { - let input = "test foo ... ok\n---- test_bar stdout ----\nerror: assertion failed\ntest result: 0 passed; 1 failed"; - let output = filter_test_output(input); - assert!(output.contains("----"), "separator lines preserved"); - assert!(output.contains("error:"), "error lines preserved"); - assert!(output.contains("test result:"), "summary preserved"); - assert!(!output.contains("test foo"), "passing test filtered out"); - } -} diff --git a/src/cmd/hook/claude.rs b/src/cmd/hook/claude.rs deleted file mode 100644 index 85c992765..000000000 --- a/src/cmd/hook/claude.rs +++ /dev/null @@ -1,539 +0,0 @@ -#![deny(clippy::print_stdout, clippy::print_stderr)] - -use super::{ - check_for_hook, is_hook_disabled, should_passthrough, update_command_in_tool_input, - HookResponse, HookResult, -}; -use serde::{Deserialize, Serialize}; -use serde_json::Value; -use std::io::{self, Read, Write}; - -#[derive(Deserialize)] -pub(crate) struct ClaudePayload { - tool_input: Option, -} - -#[derive(Serialize)] -#[serde(rename_all = "camelCase")] -pub(crate) struct ClaudeResponse { - hook_specific_output: HookOutput, -} - -#[derive(Serialize)] -#[serde(rename_all = "camelCase")] -struct HookOutput { - hook_event_name: &'static str, - permission_decision: &'static str, - permission_decision_reason: String, - #[serde(skip_serializing_if = "Option::is_none")] - updated_input: Option, -} - -#[derive(Deserialize)] -struct ManifestFallthroughEntry { - fallthrough_command: String, -} - -#[derive(Deserialize)] -struct ManifestFallthrough { - entries: Vec, -} - -pub(crate) fn extract_command(payload: &ClaudePayload) -> Option<&str> { - payload - .tool_input - .as_ref()? - .get("command")? - .as_str() - .filter(|s| !s.is_empty()) -} - -pub(crate) fn allow_response(reason: String, updated_input: Option) -> ClaudeResponse { - ClaudeResponse { - hook_specific_output: HookOutput { - hook_event_name: "PreToolUse", - permission_decision: "allow", - permission_decision_reason: reason, - updated_input, - }, - } -} - -pub(crate) fn deny_response(reason: String) -> ClaudeResponse { - ClaudeResponse { - hook_specific_output: HookOutput { - hook_event_name: "PreToolUse", - permission_decision: "deny", - permission_decision_reason: reason, - updated_input: None, - }, - } -} - -pub fn run() -> anyhow::Result<()> { - let mut buffer = String::new(); - io::stdin().read_to_string(&mut buffer)?; - - let response = match run_inner(&buffer) { - Ok(r) => r, - Err(_) => HookResponse::NoOpinion, - }; - - match response { - HookResponse::NoOpinion => match run_manifest_handlers(&buffer) { - ManifestResult::Blocked { json, stderr_bytes } => { - writeln!(io::stdout(), "{json}")?; - io::stderr().write_all(&stderr_bytes)?; - if stderr_bytes.is_empty() { - writeln!(io::stderr(), "Command blocked by registered handler")?; - } - std::process::exit(2); - } - ManifestResult::NoBlock => {} - }, - HookResponse::Allow(rtk_json) => match run_manifest_handlers(&buffer) { - ManifestResult::Blocked { - json: handler_json, - stderr_bytes, - } => { - writeln!(io::stdout(), "{handler_json}")?; - io::stderr().write_all(&stderr_bytes)?; - if stderr_bytes.is_empty() { - let reason = extract_deny_reason(&handler_json).unwrap_or_else(|| { - "Command blocked by registered safety handler".to_owned() - }); - writeln!(io::stderr(), "{reason}")?; - } - std::process::exit(2); - } - ManifestResult::NoBlock => { - writeln!(io::stdout(), "{rtk_json}")?; - } - }, - HookResponse::Deny(json, reason) => { - // ISSUE #4669: dual-path deny workaround — stdout JSON + stderr reason + exit 2 - writeln!(io::stdout(), "{json}")?; - writeln!(io::stderr(), "{reason}")?; - std::process::exit(2); - } - } - Ok(()) -} - -fn run_inner(buffer: &str) -> anyhow::Result { - let payload: ClaudePayload = match serde_json::from_str(buffer) { - Ok(p) => p, - Err(_) => return Ok(HookResponse::NoOpinion), - }; - - let cmd = match extract_command(&payload) { - Some(c) => c, - None => return Ok(HookResponse::NoOpinion), - }; - - if is_hook_disabled() || should_passthrough(cmd) { - return Ok(HookResponse::NoOpinion); - } - - let result = check_for_hook(cmd, "claude"); - - match result { - HookResult::Rewrite(new_cmd) => { - let updated = update_command_in_tool_input(payload.tool_input, new_cmd); - - let response = allow_response("RTK safety rewrite applied".into(), Some(updated)); - let json = serde_json::to_string(&response)?; - Ok(HookResponse::Allow(json)) - } - HookResult::Blocked(msg) => { - let response = deny_response(msg.clone()); - let json = serde_json::to_string(&response)?; - Ok(HookResponse::Deny(json, msg)) - } - } -} - -fn manifest_path() -> Option { - let home = std::env::var("HOME") - .or_else(|_| std::env::var("USERPROFILE")) - .ok()?; - Some( - std::path::Path::new(&home) - .join(".claude") - .join("hooks") - .join("rtk-bash-manifest.json"), - ) -} - -fn is_json_deny(json_str: &str) -> bool { - let Ok(v) = serde_json::from_str::(json_str.trim()) else { - return false; - }; - let cc_deny = v - .get("hookSpecificOutput") - .and_then(|o| o.get("permissionDecision")) - .and_then(|d| d.as_str()) - == Some("deny"); - let gemini_deny = v.get("decision").and_then(|d| d.as_str()) == Some("deny"); - cc_deny || gemini_deny -} - -fn extract_deny_reason(json_str: &str) -> Option { - let v: Value = serde_json::from_str(json_str.trim()).ok()?; - if let Some(r) = v - .get("hookSpecificOutput") - .and_then(|o| o.get("permissionDecisionReason")) - .and_then(|r| r.as_str()) - { - return Some(r.to_owned()); - } - v.get("reason").and_then(|r| r.as_str()).map(str::to_owned) -} - -enum ManifestResult { - Blocked { json: String, stderr_bytes: Vec }, - NoBlock, -} - -fn load_manifest() -> Option { - let path = manifest_path()?; - if !path.exists() { - return None; - } - let content = std::fs::read_to_string(&path).ok()?; - serde_json::from_str(&content).ok() -} - -fn run_manifest_handlers(payload: &str) -> ManifestResult { - let manifest = match load_manifest() { - Some(m) => m, - None => return ManifestResult::NoBlock, - }; - - let mut block_json: Option = None; - let mut block_stderr: Vec = Vec::new(); - - for entry in &manifest.entries { - let mut child = match std::process::Command::new("sh") - .arg("-c") - .arg(&entry.fallthrough_command) - .stdin(std::process::Stdio::piped()) - .stdout(std::process::Stdio::piped()) - .stderr(std::process::Stdio::piped()) - .spawn() - { - Ok(c) => c, - Err(_) => continue, - }; - - // Track write success to avoid false-positive exit 2 on partial stdin - let write_ok = if let Some(mut stdin) = child.stdin.take() { - io::Write::write_all(&mut stdin, payload.as_bytes()).is_ok() - } else { - false - }; - - let output = match child.wait_with_output() { - Ok(o) => o, - Err(_) => continue, - }; - - let exit_code = output.status.code().unwrap_or(0); - let stdout_str = String::from_utf8_lossy(&output.stdout); - let blocked = (exit_code == 2 && write_ok) || is_json_deny(&stdout_str); - - if blocked && block_json.is_none() { - block_json = Some(stdout_str.into_owned()); - block_stderr.extend_from_slice(&output.stderr); - } - } - - match block_json { - Some(json) => ManifestResult::Blocked { - json, - stderr_bytes: block_stderr, - }, - None => ManifestResult::NoBlock, - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cmd::test_helpers::EnvGuard; - - #[test] - fn test_output_uses_hook_specific_output() { - let response = allow_response("test".into(), None); - let json = serde_json::to_string(&response).unwrap(); - let parsed: Value = serde_json::from_str(&json).unwrap(); - - assert!( - parsed.get("hookSpecificOutput").is_some(), - "must have 'hookSpecificOutput' field" - ); - assert!( - parsed.get("hook_specific_output").is_none(), - "must NOT have snake_case field" - ); - } - - #[test] - fn test_output_uses_permission_decision() { - let response = allow_response("test".into(), None); - let json = serde_json::to_string(&response).unwrap(); - let parsed: Value = serde_json::from_str(&json).unwrap(); - let output = &parsed["hookSpecificOutput"]; - - assert!( - output.get("permissionDecision").is_some(), - "must have 'permissionDecision' field" - ); - assert!( - output.get("decision").is_none(), - "must NOT have Gemini-style 'decision' field" - ); - } - - #[test] - fn test_output_uses_permission_decision_reason() { - let response = deny_response("blocked".into()); - let json = serde_json::to_string(&response).unwrap(); - let parsed: Value = serde_json::from_str(&json).unwrap(); - let output = &parsed["hookSpecificOutput"]; - - assert!( - output.get("permissionDecisionReason").is_some(), - "must have 'permissionDecisionReason'" - ); - } - - #[test] - fn test_output_uses_hook_event_name() { - let response = allow_response("test".into(), None); - let json = serde_json::to_string(&response).unwrap(); - let parsed: Value = serde_json::from_str(&json).unwrap(); - - assert_eq!(parsed["hookSpecificOutput"]["hookEventName"], "PreToolUse"); - } - - #[test] - fn test_output_uses_updated_input_for_rewrite() { - let input = serde_json::json!({"command": "rtk run -c 'git status'"}); - let response = allow_response("rewrite".into(), Some(input)); - let json = serde_json::to_string(&response).unwrap(); - let parsed: Value = serde_json::from_str(&json).unwrap(); - - assert!( - parsed["hookSpecificOutput"].get("updatedInput").is_some(), - "must have 'updatedInput' for rewrites" - ); - } - - #[test] - fn test_allow_omits_updated_input_when_none() { - let response = allow_response("passthrough".into(), None); - let json = serde_json::to_string(&response).unwrap(); - - assert!( - !json.contains("updatedInput"), - "updatedInput must be omitted when None" - ); - } - - #[test] - fn test_rewrite_preserves_other_tool_input_fields() { - let original = serde_json::json!({ - "command": "git status", - "timeout": 30, - "description": "check repo" - }); - - let mut updated = original.clone(); - if let Some(obj) = updated.as_object_mut() { - obj.insert( - "command".into(), - Value::String("rtk run -c 'git status'".into()), - ); - } - - assert_eq!(updated["timeout"], 30); - assert_eq!(updated["description"], "check repo"); - assert_eq!(updated["command"], "rtk run -c 'git status'"); - } - - #[test] - fn test_output_decision_values() { - let allow = allow_response("test".into(), None); - let deny = deny_response("blocked".into()); - - let allow_json: Value = - serde_json::from_str(&serde_json::to_string(&allow).unwrap()).unwrap(); - let deny_json: Value = - serde_json::from_str(&serde_json::to_string(&deny).unwrap()).unwrap(); - - assert_eq!( - allow_json["hookSpecificOutput"]["permissionDecision"], - "allow" - ); - assert_eq!( - deny_json["hookSpecificOutput"]["permissionDecision"], - "deny" - ); - } - - #[test] - fn test_input_extra_fields_ignored() { - let json = r#"{"tool_input": {"command": "ls"}, "tool_name": "Bash", "session_id": "abc-123", "session_cwd": "/tmp", "transcript_path": "/path/to/transcript.jsonl"}"#; - let payload: ClaudePayload = serde_json::from_str(json).unwrap(); - assert_eq!(extract_command(&payload), Some("ls")); - } - - #[test] - fn test_input_tool_input_is_object() { - let json = r#"{"tool_input": {"command": "git status", "timeout": 30}}"#; - let payload: ClaudePayload = serde_json::from_str(json).unwrap(); - let input = payload.tool_input.unwrap(); - assert_eq!(input["command"].as_str().unwrap(), "git status"); - assert_eq!(input["timeout"].as_i64().unwrap(), 30); - } - - #[test] - fn test_extract_command_basic() { - let payload: ClaudePayload = - serde_json::from_str(r#"{"tool_input": {"command": "git status"}}"#).unwrap(); - assert_eq!(extract_command(&payload), Some("git status")); - } - - #[test] - fn test_extract_command_missing_tool_input() { - let payload: ClaudePayload = serde_json::from_str(r#"{}"#).unwrap(); - assert_eq!(extract_command(&payload), None); - } - - #[test] - fn test_extract_command_missing_command_field() { - let payload: ClaudePayload = - serde_json::from_str(r#"{"tool_input": {"cwd": "/tmp"}}"#).unwrap(); - assert_eq!(extract_command(&payload), None); - } - - #[test] - fn test_extract_command_empty_string() { - let payload: ClaudePayload = - serde_json::from_str(r#"{"tool_input": {"command": ""}}"#).unwrap(); - assert_eq!(extract_command(&payload), None); - } - - #[test] - fn test_shared_should_passthrough_rtk_prefix() { - assert!(should_passthrough("rtk run -c 'ls'")); - assert!(should_passthrough("rtk cargo test")); - assert!(should_passthrough("/usr/local/bin/rtk run -c 'ls'")); - } - - #[test] - fn test_shared_should_passthrough_heredoc() { - assert!(should_passthrough("cat <(input); - } - } - - #[test] - fn test_run_inner_returns_no_opinion_for_empty_payload() { - let payload: ClaudePayload = serde_json::from_str("{}").unwrap(); - assert_eq!(extract_command(&payload), None); - } - - #[test] - fn test_shared_is_hook_disabled_hook_enabled_zero() { - let _env = EnvGuard::new(); - std::env::set_var("RTK_HOOK_ENABLED", "0"); - assert!(is_hook_disabled()); - } - - #[test] - fn test_shared_is_hook_disabled_rtk_active() { - let _env = EnvGuard::new(); - std::env::set_var("RTK_ACTIVE", "1"); - assert!(is_hook_disabled()); - } - - #[test] - fn test_deny_response_includes_reason_for_stderr() { - // ISSUE #4669: deny must provide plain text reason for stderr dual-path workaround - let msg = "RTK: cat is blocked (use rtk read instead)"; - let response = deny_response(msg.to_string()); - let json = serde_json::to_string(&response).unwrap(); - let parsed: Value = serde_json::from_str(&json).unwrap(); - - assert_eq!(parsed["hookSpecificOutput"]["permissionDecision"], "deny"); - assert_eq!( - parsed["hookSpecificOutput"]["permissionDecisionReason"], - msg - ); - } - - #[test] - fn test_is_json_deny_claude_code_format() { - let json = r#"{"hookSpecificOutput":{"permissionDecision":"deny","permissionDecisionReason":"blocked"}}"#; - assert!(is_json_deny(json)); - } - - #[test] - fn test_is_json_deny_gemini_format() { - let json = r#"{"decision":"deny","reason":"blocked"}"#; - assert!(is_json_deny(json)); - } - - #[test] - fn test_is_json_deny_allow_not_matched() { - assert!(!is_json_deny( - r#"{"hookSpecificOutput":{"permissionDecision":"allow"}}"# - )); - assert!(!is_json_deny(r#"{"decision":"allow"}"#)); - assert!(!is_json_deny("")); - assert!(!is_json_deny("not json")); - } - - #[test] - fn test_extract_deny_reason_cc_format() { - let json = r#"{"hookSpecificOutput":{"permissionDecision":"deny","permissionDecisionReason":"Use Grep tool"}}"#; - assert_eq!(extract_deny_reason(json), Some("Use Grep tool".to_owned())); - } - - #[test] - fn test_extract_deny_reason_gemini_format() { - let json = r#"{"decision":"deny","reason":"command blocked"}"#; - assert_eq!( - extract_deny_reason(json), - Some("command blocked".to_owned()) - ); - } - - #[test] - fn test_extract_deny_reason_missing() { - assert_eq!(extract_deny_reason("{}"), None); - assert_eq!(extract_deny_reason("not json"), None); - } - - #[test] - fn test_load_manifest_returns_none_when_missing() { - let result = load_manifest(); - drop(result); - } -} diff --git a/src/cmd/hook/mod.rs b/src/cmd/hook/mod.rs deleted file mode 100644 index ba1e1ea37..000000000 --- a/src/cmd/hook/mod.rs +++ /dev/null @@ -1,1536 +0,0 @@ -pub(crate) mod claude; - -use super::{analysis, lexer}; - -#[derive(Debug, Clone)] -pub enum HookResult { - Rewrite(String), - Blocked(String), -} - -const MAX_REWRITE_DEPTH: usize = 3; - -#[derive(Debug, Clone, PartialEq)] -pub enum HookResponse { - NoOpinion, - Allow(String), - Deny(String, String), -} - -pub fn check_for_hook(raw: &str, _agent: &str) -> HookResult { - check_for_hook_inner(raw, 0) -} - -fn check_for_hook_inner(raw: &str, depth: usize) -> HookResult { - if depth >= MAX_REWRITE_DEPTH { - return HookResult::Blocked("Rewrite loop detected (max depth exceeded)".to_string()); - } - if raw.trim().is_empty() { - return HookResult::Rewrite(raw.to_string()); - } - - let tokens = lexer::tokenize(raw); - - let (core_tokens, suffix) = analysis::split_safe_suffix(tokens); - - if analysis::needs_shell(&core_tokens) { - return HookResult::Rewrite(format!("rtk run -c '{}'", escape_quotes(raw))); - } - - match analysis::parse_chain(core_tokens) { - Ok(commands) => { - if commands.len() == 1 { - let routed = if suffix.is_empty() { - try_route_native_command(&commands[0], raw) - } else { - let core_raw = if commands[0].args.is_empty() { - commands[0].binary.clone() - } else { - format!("{} {}", commands[0].binary, commands[0].args.join(" ")) - }; - try_route_native_command(&commands[0], &core_raw) - }; - - match routed { - Some(rtk_cmd) => { - if suffix.is_empty() { - HookResult::Rewrite(rtk_cmd) - } else { - HookResult::Rewrite(format!("{} {}", rtk_cmd, suffix)) - } - } - None => HookResult::Rewrite(raw.to_string()), - } - } else { - let substituted = reconstruct_with_rtk(&commands); - let inner = if suffix.is_empty() { - substituted - } else { - format!("{} {}", substituted, suffix) - }; - HookResult::Rewrite(format!("rtk run -c '{}'", escape_quotes(&inner))) - } - } - Err(_) => HookResult::Rewrite(raw.to_string()), - } -} - -pub fn is_hook_disabled() -> bool { - std::env::var("RTK_HOOK_ENABLED").as_deref() == Ok("0") || std::env::var("RTK_ACTIVE").is_ok() -} - -pub fn should_passthrough(cmd: &str) -> bool { - if cmd.starts_with("rtk ") || cmd.contains("/rtk ") || cmd.contains("<<") { - return true; - } - // ISSUE #196: gh --json/--jq/--template produces structured output that rtk gh - // would corrupt. Pass through unchanged so callers get raw JSON. - if (cmd.starts_with("gh ") || cmd.contains(" gh ")) - && (cmd.contains("--json") || cmd.contains("--jq") || cmd.contains("--template")) - { - return true; - } - false -} - -pub fn update_command_in_tool_input( - tool_input: Option, - new_cmd: String, -) -> serde_json::Value { - use serde_json::Value; - let mut updated = tool_input.unwrap_or_else(|| Value::Object(Default::default())); - if let Some(obj) = updated.as_object_mut() { - obj.insert("command".into(), Value::String(new_cmd)); - } - updated -} - -#[cfg(test)] -const FORMAT_PRESERVING: &[&str] = &["tail", "echo", "cat", "find", "fd"]; - -#[cfg(test)] -const TRANSPARENT_SINKS: &[&str] = &["tee", "head", "tail", "cat"]; - -fn escape_quotes(s: &str) -> String { - s.replace("'", "'\\''") -} - -fn is_env_assign(s: &str) -> bool { - if let Some(eq_pos) = s.find('=') { - let key = &s[..eq_pos]; - !key.is_empty() - && key - .chars() - .next() - .is_some_and(|c| c.is_ascii_alphabetic() || c == '_') - && key.chars().all(|c| c.is_ascii_alphanumeric() || c == '_') - } else { - false - } -} - -fn replace_first_word(raw: &str, old_prefix: &str, new_prefix: &str) -> String { - raw.strip_prefix(old_prefix) - .map(|rest| format!("{new_prefix}{rest}")) - .unwrap_or_else(|| format!("rtk run -c '{}'", escape_quotes(raw))) -} - -fn route_pnpm(cmd: &analysis::NativeCommand, raw: &str) -> String { - let sub = cmd.args.first().map(String::as_str).unwrap_or(""); - match sub { - "list" | "ls" | "outdated" | "install" => format!("rtk {raw}"), - - // ISSUE #112: shell script sed bug produces "rtk vitest run run --coverage" - "vitest" => { - let after_vitest: Vec<&str> = cmd.args[1..] - .iter() - .map(String::as_str) - .skip_while(|&a| a == "run") - .collect(); - if after_vitest.is_empty() { - "rtk vitest run".to_string() - } else { - format!("rtk vitest run {}", after_vitest.join(" ")) - } - } - - "test" => { - let after_test: Vec<&str> = cmd.args[1..].iter().map(String::as_str).collect(); - if after_test.is_empty() { - "rtk vitest run".to_string() - } else { - format!("rtk vitest run {}", after_test.join(" ")) - } - } - - "tsc" => replace_first_word(raw, "pnpm tsc", "rtk tsc"), - "lint" => replace_first_word(raw, "pnpm lint", "rtk lint"), - "eslint" => replace_first_word(raw, "pnpm eslint", "rtk lint"), - "playwright" => replace_first_word(raw, "pnpm playwright", "rtk playwright"), - - _ => format!("rtk run -c '{}'", escape_quotes(raw)), - } -} - -fn route_npx(cmd: &analysis::NativeCommand, raw: &str) -> String { - let sub = cmd.args.first().map(String::as_str).unwrap_or(""); - match sub { - "tsc" | "typescript" => replace_first_word(raw, &format!("npx {sub}"), "rtk tsc"), - "eslint" => replace_first_word(raw, "npx eslint", "rtk lint"), - "prettier" => replace_first_word(raw, "npx prettier", "rtk prettier"), - "playwright" => replace_first_word(raw, "npx playwright", "rtk playwright"), - "prisma" => replace_first_word(raw, "npx prisma", "rtk prisma"), - - "vitest" => { - let after_vitest: Vec<&str> = cmd.args[1..] - .iter() - .map(String::as_str) - .skip_while(|&a| a == "run") - .collect(); - if after_vitest.is_empty() { - "rtk vitest run".to_string() - } else { - format!("rtk vitest run {}", after_vitest.join(" ")) - } - } - - _ => format!("rtk run -c '{}'", escape_quotes(raw)), - } -} - -fn hook_lookup<'a>(binary: &'a str, sub: &str) -> Option<(&'static str, &'a str)> { - let base = binary.rsplit('/').next().unwrap_or(binary); - match base { - "git" => match sub { - "status" | "log" | "diff" | "show" | "add" | "commit" | "push" | "pull" | "fetch" - | "stash" | "branch" | "worktree" => Some(("rtk git", binary)), - _ => None, - }, - "gh" => match sub { - "pr" | "issue" | "run" => Some(("rtk gh", binary)), - _ => None, - }, - "cargo" => match sub { - "test" | "build" | "clippy" | "check" | "install" | "fmt" => { - Some(("rtk cargo", binary)) - } - _ => None, - }, - "docker" => match sub { - "ps" | "images" | "logs" => Some(("rtk docker", binary)), - _ => None, - }, - "kubectl" => match sub { - "get" | "logs" => Some(("rtk kubectl", binary)), - _ => None, - }, - "go" => match sub { - "test" | "build" | "vet" => Some(("rtk go", binary)), - _ => None, - }, - "ruff" => match sub { - "check" | "format" => Some(("rtk ruff", binary)), - _ => None, - }, - "pip" | "pip3" => match sub { - "list" | "outdated" | "install" | "show" => Some(("rtk pip", binary)), - _ => None, - }, - "grep" => Some(("rtk grep", binary)), - "rg" => Some(("rtk grep", binary)), - "ls" => Some(("rtk ls", binary)), - "eslint" => Some(("rtk lint", binary)), - "biome" => Some(("rtk lint", binary)), - "tsc" => Some(("rtk tsc", binary)), - "prettier" => Some(("rtk prettier", binary)), - "golangci-lint" | "golangci" => Some(("rtk golangci-lint", binary)), - "mypy" => Some(("rtk mypy", binary)), - "playwright" => Some(("rtk playwright", binary)), - "prisma" => Some(("rtk prisma", binary)), - "curl" => Some(("rtk curl", binary)), - "pytest" => Some(("rtk pytest", binary)), - "wc" => Some(("rtk wc", binary)), - "gt" => Some(("rtk gt", binary)), - "wget" | "diff" | "tree" | "find" => None, - _ => None, - } -} - -fn is_shell_prefix_builtin(token: &str) -> bool { - matches!( - token, - "noglob" | "command" | "builtin" | "exec" | "nocorrect" - ) -} - -pub(crate) fn route_native_command(cmd: &analysis::NativeCommand, raw: &str) -> String { - if is_shell_prefix_builtin(&cmd.binary) { - if let Some(real_binary) = cmd.args.first() { - let prefix = &cmd.binary; - let real_args = cmd.args[1..].to_vec(); - let real_cmd = analysis::NativeCommand { - binary: real_binary.clone(), - args: real_args, - operator: cmd.operator.clone(), - }; - let core_raw = raw - .strip_prefix(prefix) - .map(|s| s.trim_start()) - .unwrap_or(raw); - return match try_route_native_command(&real_cmd, core_raw) { - Some(routed) => format!("{} {}", prefix, routed), - None => raw.to_string(), - }; - } - return raw.to_string(); - } - - if is_env_assign(&cmd.binary) { - let mut env_parts: Vec<&str> = vec![cmd.binary.as_str()]; - let mut arg_idx = 0; - while arg_idx < cmd.args.len() && is_env_assign(&cmd.args[arg_idx]) { - env_parts.push(&cmd.args[arg_idx]); - arg_idx += 1; - } - if arg_idx < cmd.args.len() { - let env_prefix_str = env_parts.join(" "); - let core_raw = raw - .strip_prefix(&env_prefix_str) - .map(|s| s.trim_start()) - .unwrap_or_else(|| { - let skip = env_prefix_str.len(); - if skip < raw.len() { - raw[skip..].trim_start() - } else { - raw - } - }); - let real_binary = cmd.args[arg_idx].clone(); - let real_args = cmd.args[arg_idx + 1..].to_vec(); - let real_cmd = analysis::NativeCommand { - binary: real_binary, - args: real_args, - operator: cmd.operator.clone(), - }; - return match try_route_native_command(&real_cmd, core_raw) { - Some(routed) => format!("{} {}", env_prefix_str, routed), - None => raw.to_string(), - }; - } - } - - let sub = cmd.args.first().map(String::as_str).unwrap_or(""); - let sub2 = cmd.args.get(1).map(String::as_str).unwrap_or(""); - - if let Some((rtk_full, prefix)) = hook_lookup(&cmd.binary, sub) { - return replace_first_word(raw, prefix, rtk_full); - } - - if cmd.binary == "cat" { - return replace_first_word(raw, "cat", "rtk read"); - } - - match cmd.binary.as_str() { - "vitest" if sub.is_empty() => "rtk vitest run".to_string(), - "vitest" => format!("rtk {raw}"), - - "uv" if sub == "pip" && matches!(sub2, "list" | "outdated" | "install" | "show") => { - replace_first_word(raw, "uv pip", "rtk pip") - } - - "python" | "python3" if sub == "-m" && sub2 == "pytest" => { - let prefix = format!("{} -m pytest", cmd.binary); - replace_first_word(raw, &prefix, "rtk pytest") - } - - "python" | "python3" if sub == "-m" && sub2 == "mypy" => { - let prefix = format!("{} -m mypy", cmd.binary); - replace_first_word(raw, &prefix, "rtk mypy") - } - - "pnpm" => route_pnpm(cmd, raw), - "npx" => route_npx(cmd, raw), - - _ => format!("rtk run -c '{}'", escape_quotes(raw)), - } -} - -pub(crate) fn try_route_native_command(cmd: &analysis::NativeCommand, raw: &str) -> Option { - let routed = route_native_command(cmd, raw); - if routed.starts_with("rtk run -c") { - None - } else { - Some(routed) - } -} - -fn reconstruct_with_rtk(commands: &[analysis::NativeCommand]) -> String { - commands - .iter() - .map(|cmd| { - let core_raw = if cmd.args.is_empty() { - cmd.binary.clone() - } else { - format!("{} {}", cmd.binary, cmd.args.join(" ")) - }; - - let part = match try_route_native_command(cmd, &core_raw) { - Some(routed) => routed, - None => core_raw, - }; - - match &cmd.operator { - Some(op) => format!("{} {}", part, op), - None => part, - } - }) - .collect::>() - .join(" ") -} - -pub fn format_for_claude(result: HookResult) -> (String, bool, i32) { - match result { - HookResult::Rewrite(cmd) => (cmd, true, 0), - HookResult::Blocked(msg) => (msg, false, 2), - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn assert_rewrite(input: &str, contains: &str) { - match check_for_hook(input, "claude") { - HookResult::Rewrite(cmd) => assert!( - cmd.contains(contains), - "'{}' rewrite should contain '{}', got '{}'", - input, - contains, - cmd - ), - other => panic!("Expected Rewrite for '{}', got {:?}", input, other), - } - } - - fn assert_blocked(input: &str, depth: usize, contains: &str) { - match check_for_hook_inner(input, depth) { - HookResult::Blocked(msg) => assert!( - msg.contains(contains), - "'{}' block msg should contain '{}', got '{}'", - input, - contains, - msg - ), - other => panic!("Expected Blocked for '{}', got {:?}", input, other), - } - } - - fn assert_passthrough(input: &str) { - match check_for_hook(input, "claude") { - HookResult::Rewrite(cmd) => { - assert!( - !cmd.contains("rtk run -c"), - "command should NOT be wrapped in rtk run -c, got '{}'", - cmd - ); - assert_eq!(cmd, input, "unknown command should pass through unchanged"); - } - HookResult::Blocked(_) => panic!("Expected passthrough for '{}', got Blocked", input), - } - } - - #[test] - fn test_escape_quotes() { - assert_eq!(escape_quotes("hello"), "hello"); - assert_eq!(escape_quotes("it's"), "it'\\''s"); - assert_eq!(escape_quotes("it's a test's"), "it'\\''s a test'\\''s"); - } - - #[test] - fn test_check_empty_and_whitespace() { - match check_for_hook("", "claude") { - HookResult::Rewrite(cmd) => assert!(cmd.is_empty()), - _ => panic!("Expected Rewrite for empty"), - } - match check_for_hook(" ", "claude") { - HookResult::Rewrite(cmd) => assert!(cmd.trim().is_empty()), - _ => panic!("Expected Rewrite for whitespace"), - } - } - - #[test] - fn test_safe_commands_rewrite() { - assert_rewrite("git status", "rtk git status"); - assert_rewrite(r#"git commit -m "Fix && Bug""#, "rtk git commit"); - - let shell_cases = [ - ("ls *.rs", "rtk run"), - ("echo `date`", "rtk run"), - ("echo $(date)", "rtk run"), - ("echo {a,b}.txt", "rtk run"), - ("cd /tmp && git status", "rtk run"), - ]; - for (input, expected) in shell_cases { - assert_rewrite(input, expected); - } - - assert_passthrough("FOO=bar echo hello"); - assert_passthrough("echo 'hello!@#$%^&*()'"); - assert_passthrough(&format!("echo {}", "a".repeat(1000))); - - match check_for_hook("cd /tmp && git status", "claude") { - HookResult::Rewrite(cmd) => assert!( - cmd.contains("&&"), - "Chain rewrite must preserve '&&', got '{}'", - cmd - ), - other => panic!("Expected Rewrite for chain, got {:?}", other), - } - } - - #[test] - fn test_env_prefix_routes_to_rtk_subcommand() { - let cases = [ - ("GIT_PAGER=cat git status", "rtk git", "GIT_PAGER=cat"), - ( - "GIT_PAGER=cat git log --oneline -10", - "rtk git", - "GIT_PAGER=cat", - ), - ("RUST_LOG=debug cargo test", "rtk cargo", "RUST_LOG=debug"), - ("LANG=C ls -la", "rtk ls", "LANG=C"), - ( - "TEST_SESSION_ID=2 npx playwright test --config=foo", - "rtk playwright", - "TEST_SESSION_ID=2", - ), - ]; - for (input, rtk_sub, env_prefix) in cases { - match check_for_hook(input, "claude") { - HookResult::Rewrite(cmd) => { - assert!( - cmd.contains(rtk_sub), - "'{input}' must route to '{rtk_sub}', got '{cmd}'" - ); - assert!( - cmd.contains(env_prefix), - "'{input}' must preserve env prefix '{env_prefix}', got '{cmd}'" - ); - } - other => panic!("Expected Rewrite for '{input}', got {other:?}"), - } - } - } - - #[test] - fn test_env_prefix_multi_var_routes() { - let input = "NODE_ENV=test CI=1 npx vitest run"; - match check_for_hook(input, "claude") { - HookResult::Rewrite(cmd) => { - assert!( - cmd.contains("rtk vitest"), - "must route to rtk vitest, got '{cmd}'" - ); - assert!( - cmd.contains("NODE_ENV=test"), - "must preserve NODE_ENV, got '{cmd}'" - ); - assert!(cmd.contains("CI=1"), "must preserve CI, got '{cmd}'"); - } - other => panic!("Expected Rewrite, got {other:?}"), - } - } - - #[test] - fn test_env_prefix_unknown_cmd_fallback() { - assert_passthrough("VAR=1 unknown_xyz_abc_cmd"); - } - - #[test] - fn test_env_prefix_npm_still_passthrough() { - assert_passthrough("NODE_ENV=test npm run test:e2e"); - } - - #[test] - fn test_env_prefix_docker_compose_passthrough() { - assert_passthrough("COMPOSE_PROJECT_NAME=test docker compose up -d"); - } - - #[test] - fn test_global_options_not_blocked() { - let cases = [ - "git --no-pager status", - "git -C /path/to/project status", - "git -C /path --no-pager log --oneline", - "git --no-optional-locks diff HEAD", - "git --bare log", - "cargo +nightly test", - "cargo +stable build --release", - "docker --context prod ps", - "docker -H tcp://host:2375 images", - "kubectl -n kube-system get pods", - "kubectl --context prod describe pod foo", - ]; - for input in cases { - assert_passthrough(input); - } - } - - #[test] - fn test_specific_commands_not_blocked() { - let cases = [ - "git log --oneline -10", - "git diff HEAD", - "git show abc123", - "git add .", - "gh pr list", - "gh api repos/owner/repo", - "gh release list", - "npm run test:e2e", - "npm run build", - "npm test", - "docker compose up -d", - "docker compose logs postgrest", - "docker compose down", - "docker run --rm postgres", - "docker exec -it db psql", - "kubectl describe pod foo", - "kubectl apply -f deploy.yaml", - "npx playwright test", - "npx prisma migrate", - "cargo test", - "vitest", - "vitest run", - "vitest run --reporter=verbose", - "npx vitest run", - "pnpm vitest run --coverage", - "vue-tsc -b", - "npx vue-tsc --noEmit", - "curl -s https://example.com", - "ls -la", - "grep -rn pattern src/", - "rg pattern src/", - ]; - for input in cases { - assert!( - matches!(check_for_hook(input, "claude"), HookResult::Rewrite(_)), - "'{}' should Rewrite (not Blocked)", - input - ); - } - } - - #[test] - fn test_builtins_not_blocked() { - let cases = [ - "echo hello world", - "cd /tmp", - "mkdir -p foo/bar", - "python3 script.py", - "find . -name '*.ts'", - "tree src/", - "wget https://example.com/file", - ]; - for input in cases { - assert_passthrough(input); - } - assert_passthrough("node -e 'console.log(1)'"); - } - - #[test] - fn test_noglob_prefix_routes_inner_command() { - assert_rewrite("noglob gh pr view 123", "noglob rtk gh pr view 123"); - } - - #[test] - fn test_noglob_prefix_with_unknown_command() { - match check_for_hook("noglob some-unknown-tool --arg", "claude") { - HookResult::Rewrite(cmd) => { - assert!( - !cmd.contains("rtk run -c 'noglob"), - "noglob should not be inside rtk run -c, got '{}'", - cmd - ); - } - HookResult::Blocked(_) => panic!("should not be blocked"), - } - } - - #[test] - fn test_command_prefix_routes_inner_command() { - assert_rewrite("command git status", "command rtk git status"); - } - - #[test] - fn test_builtin_prefix_passthrough() { - match check_for_hook("builtin cd /tmp", "claude") { - HookResult::Rewrite(cmd) => { - assert!( - !cmd.contains("rtk run -c 'builtin"), - "builtin should not be inside rtk run -c, got '{}'", - cmd - ); - } - HookResult::Blocked(_) => panic!("should not be blocked"), - } - } - - #[test] - fn test_nocorrect_prefix_routes_inner_command() { - assert_rewrite("nocorrect git log -10", "nocorrect rtk git log"); - } - - #[test] - fn test_noglob_gh_release_create_exact_bug_report() { - let input = "noglob gh release create v0.3.0-rc1 --title v0.3.0-rc1 --notes test --prerelease --draft"; - match check_for_hook(input, "claude") { - HookResult::Rewrite(cmd) => { - assert!( - !cmd.contains("rtk run -c 'noglob"), - "noglob must not be inside rtk run -c, got '{}'", - cmd - ); - assert!( - cmd.starts_with("noglob "), - "noglob must be the outermost prefix, got '{}'", - cmd - ); - } - HookResult::Blocked(_) => panic!("should not be blocked"), - } - } - - #[test] - fn test_nested_shell_prefixes() { - assert_rewrite("noglob command git status", "noglob command rtk git status"); - } - - #[test] - fn test_shell_prefix_plus_env_prefix() { - assert_rewrite( - "noglob GIT_PAGER=cat git log -10", - "noglob GIT_PAGER=cat rtk git log", - ); - } - - #[test] - fn test_exec_prefix_routes_inner_command() { - assert_rewrite("exec git status", "exec rtk git status"); - } - - #[test] - fn test_bare_shell_prefix_passthrough() { - match check_for_hook("noglob", "claude") { - HookResult::Rewrite(cmd) => { - assert_eq!(cmd, "noglob", "bare prefix should pass through unchanged"); - } - HookResult::Blocked(_) => panic!("should not be blocked"), - } - } - - #[test] - fn test_unknown_command_passthrough() { - assert_passthrough("gh release create v0.3.0 --title test"); - } - - #[test] - fn test_full_path_binary_routes_correctly() { - assert_rewrite("/opt/homebrew/bin/git status", "rtk git status"); - } - - #[test] - fn test_full_path_unknown_command_passthrough() { - assert_passthrough("/opt/homebrew/bin/gh release create v0.3.0"); - } - - #[test] - fn test_env_prefix_unknown_command_passthrough() { - assert_passthrough("GH_DEBUG= gh release create v0.3.0"); - } - - #[test] - fn test_noglob_unknown_command_passthrough() { - assert_passthrough("noglob gh release create v0.3.0"); - } - - #[test] - fn test_chain_mixed_known_unknown() { - match check_for_hook("gh release create v1 && git status", "claude") { - HookResult::Rewrite(cmd) => { - assert!(cmd.contains("rtk run -c"), "chains still need rtk run -c"); - assert!(cmd.contains("rtk git status"), "known cmd routed"); - assert!( - cmd.contains("gh release create v1"), - "unknown cmd preserved" - ); - } - HookResult::Blocked(_) => panic!("should not be blocked"), - } - } - - #[test] - fn test_gh_release_create_exact_bug_report() { - let input = r#"gh release create v0.3.0 --title "ai_session_tools v0.3.0" --notes-file notes/v0.3.0-release.md"#; - assert_passthrough(input); - } - - #[test] - fn test_completely_unknown_binary_passthrough() { - assert_passthrough("some-custom-tool --flag value"); - } - - #[test] - fn test_compound_commands_rewrite() { - let cases = [ - ("cd /tmp && git status", "&&"), - ("cd dir && git status && git diff", "&&"), - ("git add . && git commit -m msg", "&&"), - ("echo start ; git status ; echo done", ";"), - ("git pull || echo failed", "||"), - ]; - for (input, operator) in cases { - match check_for_hook(input, "claude") { - HookResult::Rewrite(cmd) => { - assert!(cmd.contains("rtk run"), "'{input}' should rewrite"); - assert!( - cmd.contains(operator), - "'{input}' must preserve '{operator}', got '{cmd}'" - ); - } - other => panic!("Expected Rewrite for '{input}', got {other:?}"), - } - } - } - - #[test] - fn test_compound_quoted_operators_not_split() { - let input = r#"git commit -m "Fix && Bug""#; - match check_for_hook(input, "claude") { - HookResult::Rewrite(cmd) => { - assert!( - cmd.contains("rtk git commit"), - "Quoted && must not split; should route to rtk git commit, got '{cmd}'" - ); - } - other => panic!("Expected Rewrite for quoted &&, got {other:?}"), - } - } - - #[test] - fn test_suffix_2_redirect_routes_to_rtk() { - let input = "cargo test 2>&1"; - match check_for_hook(input, "claude") { - HookResult::Rewrite(cmd) => { - assert!( - cmd.contains("rtk cargo"), - "must use rtk cargo filter, got '{cmd}'" - ); - assert!( - cmd.contains("2>&1"), - "must preserve 2>&1 suffix, got '{cmd}'" - ); - assert!( - !cmd.contains("rtk run -c"), - "must NOT fall back to passthrough, got '{cmd}'" - ); - } - other => panic!("Expected Rewrite, got {other:?}"), - } - } - - #[test] - fn test_suffix_dev_null_routes_to_rtk() { - let input = "cargo test 2>/dev/null"; - match check_for_hook(input, "claude") { - HookResult::Rewrite(cmd) => { - assert!(cmd.contains("rtk cargo"), "must use rtk cargo, got '{cmd}'"); - assert!( - cmd.contains("/dev/null"), - "must preserve /dev/null suffix, got '{cmd}'" - ); - assert!( - !cmd.contains("rtk run -c"), - "must NOT fall back to passthrough, got '{cmd}'" - ); - } - other => panic!("Expected Rewrite, got {other:?}"), - } - } - - #[test] - fn test_suffix_pipe_tee_routes_to_rtk() { - let input = "cargo test | tee /tmp/log.txt"; - match check_for_hook(input, "claude") { - HookResult::Rewrite(cmd) => { - assert!( - cmd.contains("rtk cargo"), - "must use rtk cargo filter, got '{cmd}'" - ); - assert!(cmd.contains("tee"), "must preserve tee suffix, got '{cmd}'"); - } - other => panic!("Expected Rewrite, got {other:?}"), - } - } - - #[test] - fn test_suffix_pipe_head_routes_to_rtk() { - let input = "git log | head -20"; - match check_for_hook(input, "claude") { - HookResult::Rewrite(cmd) => { - assert!(cmd.contains("rtk git"), "must use rtk git, got '{cmd}'"); - assert!( - cmd.contains("head"), - "must preserve head suffix, got '{cmd}'" - ); - assert!( - !cmd.contains("rtk run -c"), - "must NOT fall back to passthrough, got '{cmd}'" - ); - } - other => panic!("Expected Rewrite, got {other:?}"), - } - } - - #[test] - fn test_suffix_unknown_cmd_still_passthrough() { - assert_passthrough("unknown_xyz_cmd 2>&1"); - } - - #[test] - fn test_suffix_unsafe_pipe_still_passthrough() { - let input = "cargo test | grep FAILED"; - match check_for_hook(input, "claude") { - HookResult::Rewrite(cmd) => { - let _ = cmd; - } - other => panic!("Expected Rewrite, got {other:?}"), - } - } - - #[test] - fn test_token_waste_allowed_in_pipelines() { - let cases = [ - "cat file.txt | grep pattern", - "cat file.txt > output.txt", - "sed 's/old/new/' file.txt > output.txt", - "head -n 10 file.txt | grep pattern", - "for f in *.txt; do cat \"$f\" | grep x; done", - ]; - for input in cases { - assert_rewrite(input, "rtk run"); - } - } - - #[test] - fn test_different_agents_same_result() { - for agent in ["claude", "gemini"] { - match check_for_hook("git status", agent) { - HookResult::Rewrite(_) => {} - other => panic!("Expected Rewrite for agent '{}', got {:?}", agent, other), - } - } - } - - #[test] - fn test_format_for_claude() { - let (output, success, code) = - format_for_claude(HookResult::Rewrite("rtk run -c 'git status'".to_string())); - assert_eq!(output, "rtk run -c 'git status'"); - assert!(success); - assert_eq!(code, 0); - - let (output, success, code) = - format_for_claude(HookResult::Blocked("Error message".to_string())); - assert_eq!(output, "Error message"); - assert!(!success); - assert_eq!(code, 2); - } - - #[test] - fn test_dollar_var_routes_natively() { - let result = match check_for_hook("git log $BRANCH", "claude") { - HookResult::Rewrite(cmd) => cmd, - other => panic!("Expected Rewrite, got {:?}", other), - }; - assert!( - result.contains("rtk git"), - "Expected rtk git routing for 'git log $BRANCH', got: {}", - result - ); - assert!( - !result.contains("rtk run"), - "Should not fall to passthrough for simple $VAR, got: {}", - result - ); - } - - #[test] - fn test_dollar_subshell_still_passthrough() { - let result = match check_for_hook("git log $(git rev-parse HEAD)", "claude") { - HookResult::Rewrite(cmd) => cmd, - other => panic!("Expected Rewrite, got {:?}", other), - }; - assert!( - result.contains("rtk run"), - "Subshell $(...) must route to passthrough, got: {}", - result - ); - } - - #[test] - fn test_rewrite_depth_limit_blocked() { - assert_blocked("echo hello", MAX_REWRITE_DEPTH, "loop"); - } - - #[test] - fn test_rewrite_depth_limit_allowed() { - match check_for_hook_inner("echo hello", 0) { - HookResult::Rewrite(cmd) => assert_eq!(cmd, "echo hello"), - _ => panic!("Expected Rewrite at depth 0"), - } - } - - #[test] - fn test_claude_rewrite_exit_code_is_zero() { - let (_, _, code) = format_for_claude(HookResult::Rewrite("rtk run -c 'ls'".into())); - assert_eq!(code, 0, "Rewrite must exit 0 (success)"); - } - - #[test] - fn test_claude_block_exit_code_is_two() { - let (_, _, code) = format_for_claude(HookResult::Blocked("denied".into())); - assert_eq!( - code, 2, - "Block must exit 2 (blocking error per Claude Code spec)" - ); - } - - #[test] - fn test_claude_rewrite_output_is_command_text() { - let (output, success, _) = - format_for_claude(HookResult::Rewrite("rtk run -c 'git status'".into())); - assert_eq!(output, "rtk run -c 'git status'"); - assert!(success); - assert!( - !output.starts_with('{'), - "Rewrite output must be plain text, not JSON" - ); - } - - #[test] - fn test_claude_block_output_is_human_message() { - let (output, success, _) = - format_for_claude(HookResult::Blocked("Use Read tool instead".into())); - assert_eq!(output, "Use Read tool instead"); - assert!(!success); - assert!( - !output.starts_with('{'), - "Block output must be plain text, not JSON" - ); - } - - #[test] - fn test_claude_rewrite_success_flag_true() { - let (_, success, _) = format_for_claude(HookResult::Rewrite("cmd".into())); - assert!(success, "Rewrite must set success=true"); - } - - #[test] - fn test_claude_block_success_flag_false() { - let (_, success, _) = format_for_claude(HookResult::Blocked("msg".into())); - assert!(!success, "Block must set success=false"); - } - - #[test] - fn test_claude_exit_codes_not_one() { - let (_, _, rewrite_code) = format_for_claude(HookResult::Rewrite("cmd".into())); - let (_, _, block_code) = format_for_claude(HookResult::Blocked("msg".into())); - assert_ne!( - rewrite_code, 1, - "Exit code 1 is non-blocking error, not valid for rewrite" - ); - assert_ne!( - block_code, 1, - "Exit code 1 is non-blocking error, not valid for block" - ); - } - - #[test] - fn test_cross_protocol_safe_command_allowed_by_both() { - for cmd in ["git status", "cargo test", "ls -la", "echo hello"] { - let claude = check_for_hook(cmd, "claude"); - let gemini = check_for_hook(cmd, "gemini"); - match (&claude, &gemini) { - (HookResult::Rewrite(_), HookResult::Rewrite(_)) => {} - _ => panic!( - "'{}': Claude={:?}, Gemini={:?} -- both should Rewrite", - cmd, claude, gemini - ), - } - } - } - - #[test] - fn test_routing_native_commands() { - let cases = [ - ("git status", "rtk git status"), - ("git log --oneline -10", "rtk git log --oneline -10"), - ("git diff HEAD", "rtk git diff HEAD"), - ("git add .", "rtk git add ."), - ("git commit -m msg", "rtk git commit"), - ("gh pr view 156", "rtk gh pr view 156"), - ("cargo test", "rtk cargo test"), - ( - "cargo clippy --all-targets", - "rtk cargo clippy --all-targets", - ), - ("grep -r pattern src/", "rtk grep -r pattern src/"), - ("rg pattern src/", "rtk grep pattern src/"), - ("ls -la", "rtk ls -la"), - ("vitest", "rtk vitest run"), - ("vitest run", "rtk vitest run"), - ("vitest run --coverage", "rtk vitest run --coverage"), - ("pnpm test", "rtk vitest run"), - ("pnpm vitest", "rtk vitest run"), - ("pnpm lint", "rtk lint"), - ("pnpm eslint src/", "rtk lint"), - ("pnpm eslint .", "rtk lint ."), - ("pnpm eslint --fix src/", "rtk lint"), - ("npx tsc --noEmit", "rtk tsc --noEmit"), - ("python -m pytest tests/", "rtk pytest tests/"), - ("uv pip list", "rtk pip list"), - ("go test ./...", "rtk go test ./..."), - ("go build ./...", "rtk go build ./..."), - ("go vet ./...", "rtk go vet ./..."), - ("eslint src/", "rtk lint src/"), - ("tsc --noEmit", "rtk tsc --noEmit"), - ("prettier src/", "rtk prettier src/"), - ("playwright test", "rtk playwright test"), - ("prisma migrate dev", "rtk prisma migrate dev"), - ( - "curl https://api.example.com", - "rtk curl https://api.example.com", - ), - ("pytest tests/", "rtk pytest tests/"), - ("pytest -x tests/unit", "rtk pytest -x tests/unit"), - ("golangci-lint run ./...", "rtk golangci-lint run ./..."), - ("docker ps", "rtk docker ps"), - ("docker images", "rtk docker images"), - ("docker logs mycontainer", "rtk docker logs mycontainer"), - ("kubectl get pods", "rtk kubectl get pods"), - ("kubectl logs mypod", "rtk kubectl logs mypod"), - ("ruff check src/", "rtk ruff check src/"), - ("ruff format src/", "rtk ruff format src/"), - ("pip list", "rtk pip list"), - ("pip install requests", "rtk pip install requests"), - ("pip outdated", "rtk pip outdated"), - ("pip show requests", "rtk pip show requests"), - ("gh issue list", "rtk gh issue list"), - ("gh run view 123", "rtk gh run view 123"), - ("git stash pop", "rtk git stash pop"), - ("git fetch origin", "rtk git fetch origin"), - ("gt log", "rtk gt log"), - ("gt submit", "rtk gt submit"), - ("gt sync", "rtk gt sync"), - ("gt create feat/new-branch", "rtk gt create feat/new-branch"), - ]; - for (input, expected) in cases { - assert_rewrite(input, expected); - } - } - - #[test] - fn test_routing_subcommand_filter_fallback() { - let cases = [ - "docker build .", - "docker run -it nginx", - "kubectl apply -f dep.yaml", - "kubectl delete pod mypod", - "go mod tidy", - "go generate ./...", - "ruff lint src/", - "pip freeze", - "pip uninstall requests", - "cargo publish", - "cargo run", - "git rebase -i HEAD~3", - "git cherry-pick abc123", - "gh repo clone foo/bar", - ]; - for input in cases { - assert_passthrough(input); - } - } - - #[test] - fn test_routing_vitest_no_double_run() { - // ISSUE #112: shell script sed bug produces "rtk vitest run run --coverage" - let result = match check_for_hook("pnpm vitest run --coverage", "claude") { - HookResult::Rewrite(cmd) => cmd, - other => panic!("Expected Rewrite, got {:?}", other), - }; - assert_rewrite("pnpm vitest run --coverage", "rtk vitest run --coverage"); - assert!( - !result.contains("run run"), - "Must not double 'run' in output: '{}'", - result - ); - } - - #[test] - fn test_routing_fallbacks_to_rtk_run() { - let chain_cases = ["git add . && git commit -m msg", "git log | grep fix"]; - for input in chain_cases { - assert_rewrite(input, "rtk run -c"); - } - let passthrough_cases = [ - "git checkout main", - "tail -n 20 file.txt", - "tail -f server.log", - ]; - for input in passthrough_cases { - assert_passthrough(input); - } - } - - #[test] - fn test_cross_agent_routing_identical() { - for cmd in ["git status", "cargo test", "ls -la"] { - let claude_result = check_for_hook(cmd, "claude"); - let gemini_result = check_for_hook(cmd, "gemini"); - match (&claude_result, &gemini_result) { - (HookResult::Rewrite(c), HookResult::Rewrite(g)) => { - assert_eq!(c, g, "claude and gemini must route '{}' identically", cmd); - assert!( - !c.contains("rtk run -c"), - "'{}' should not fall back to rtk run -c", - cmd - ); - } - _ => panic!( - "'{}' should Rewrite for both agents: claude={:?} gemini={:?}", - cmd, claude_result, gemini_result - ), - } - } - } - - #[test] - fn test_chain_both_commands_substituted() { - let result = match check_for_hook("cargo test && git log", "claude") { - HookResult::Rewrite(cmd) => cmd, - other => panic!("Expected Rewrite, got {:?}", other), - }; - assert!( - result.contains("rtk cargo"), - "cargo test must be substituted to rtk cargo inside chain: {}", - result - ); - assert!( - result.contains("rtk git"), - "git log must be substituted to rtk git inside chain: {}", - result - ); - assert!( - result.contains("rtk run"), - "chain still needs shell wrapper (rtk run -c): {}", - result - ); - } - - #[test] - fn test_chain_with_dollar_var_substituted() { - let result = match check_for_hook("cargo test && git log $BRANCH", "claude") { - HookResult::Rewrite(cmd) => cmd, - other => panic!("Expected Rewrite, got {:?}", other), - }; - assert!( - result.contains("rtk cargo"), - "cargo test must be rtk in chain: {}", - result - ); - assert!( - result.contains("rtk git log"), - "git log $BRANCH must be rtk with var preserved: {}", - result - ); - assert!( - result.contains("$BRANCH"), - "$BRANCH must be preserved in rewritten chain: {}", - result - ); - } - - #[test] - fn test_chain_unknown_command_not_substituted() { - let result = match check_for_hook("cargo test && unknown_xyz_cmd", "claude") { - HookResult::Rewrite(cmd) => cmd, - other => panic!("Expected Rewrite, got {:?}", other), - }; - assert!( - result.contains("rtk cargo"), - "cargo test must be substituted to rtk: {}", - result - ); - assert!( - result.contains("unknown_xyz_cmd"), - "unknown command must pass through unchanged: {}", - result - ); - assert!( - !result.contains("rtk unknown"), - "must not invent rtk subcommands for unknown binary: {}", - result - ); - } - - #[test] - fn test_semicolon_chain_substituted() { - let result = match check_for_hook("cargo test ; git status", "claude") { - HookResult::Rewrite(cmd) => cmd, - other => panic!("Expected Rewrite, got {:?}", other), - }; - assert!( - result.contains("rtk cargo"), - "cargo must be rtk in semicolon chain: {}", - result - ); - assert!( - result.contains("rtk git"), - "git must be rtk in semicolon chain: {}", - result - ); - } - - #[test] - fn test_or_chain_substituted() { - let result = match check_for_hook("cargo test || go test ./...", "claude") { - HookResult::Rewrite(cmd) => cmd, - other => panic!("Expected Rewrite, got {:?}", other), - }; - assert!( - result.contains("rtk cargo"), - "cargo must be rtk in || chain: {}", - result - ); - assert!( - result.contains("rtk go"), - "go must be rtk in || chain: {}", - result - ); - } - - #[test] - fn test_format_preserving_contains_expected() { - assert!( - FORMAT_PRESERVING.contains(&"tail"), - "tail is format-preserving (line-per-line passthrough)" - ); - assert!( - FORMAT_PRESERVING.contains(&"echo"), - "echo is format-preserving (output equals input)" - ); - assert!( - FORMAT_PRESERVING.contains(&"find"), - "find is format-preserving (path-per-line)" - ); - assert!( - FORMAT_PRESERVING.contains(&"cat"), - "cat is format-preserving (byte passthrough)" - ); - } - - #[test] - fn test_format_changing_not_in_format_preserving() { - assert!( - !FORMAT_PRESERVING.contains(&"cargo"), - "cargo test compresses output -- not format-preserving" - ); - assert!( - !FORMAT_PRESERVING.contains(&"git"), - "git log/diff compresses output -- not format-preserving" - ); - assert!( - !FORMAT_PRESERVING.contains(&"pytest"), - "pytest compresses output -- not format-preserving" - ); - assert!( - !FORMAT_PRESERVING.contains(&"go"), - "go test compresses output -- not format-preserving" - ); - } - - #[test] - fn test_transparent_sinks_contains_expected() { - assert!( - TRANSPARENT_SINKS.contains(&"tee"), - "tee is a transparent sink (copies stdin to file + stdout)" - ); - assert!( - TRANSPARENT_SINKS.contains(&"head"), - "head is a transparent sink (truncates lines)" - ); - assert!( - TRANSPARENT_SINKS.contains(&"cat"), - "cat is a transparent sink (passes through)" - ); - assert!( - TRANSPARENT_SINKS.contains(&"tail"), - "tail is a transparent sink (last N lines)" - ); - } - - fn count_tokens(text: &str) -> usize { - text.split_whitespace().count() - } - - fn exec(cmd: &str) -> String { - let parts: Vec<&str> = cmd.split_whitespace().collect(); - let out = std::process::Command::new(parts[0]) - .args(&parts[1..]) - .output() - .unwrap_or_else(|e| panic!("failed to exec '{cmd}': {e}")); - String::from_utf8_lossy(&out.stdout).to_string() - } - - #[test] - #[ignore = "requires installed rtk binary (cargo install --path .) and git repo"] - fn test_e2e_git_status_saves_tokens() { - let raw_cmd = "git status"; - let rtk_cmd = match check_for_hook(raw_cmd, "claude") { - HookResult::Rewrite(cmd) => cmd, - other => panic!("Expected Rewrite for '{raw_cmd}', got {other:?}"), - }; - assert!( - rtk_cmd.starts_with("rtk git"), - "lexer+router should produce rtk git status, got: {rtk_cmd}" - ); - - let raw_out = exec(raw_cmd); - let rtk_out = exec(&rtk_cmd); - let raw_tok = count_tokens(&raw_out); - let rtk_tok = count_tokens(&rtk_out); - assert!(raw_tok > 0, "raw git status produced no output"); - - let savings = 100.0 * (1.0 - rtk_tok as f64 / raw_tok as f64); - assert!( - savings >= 40.0, - "rtk git status should save >=40% tokens vs raw git status, \ - got {savings:.1}% ({raw_tok} raw -> {rtk_tok} rtk tokens)" - ); - } - - #[test] - #[ignore = "requires installed rtk binary (cargo install --path .) and directory with files"] - fn test_e2e_ls_saves_tokens() { - let raw_cmd = "ls -la ."; - let rtk_cmd = match check_for_hook(raw_cmd, "claude") { - HookResult::Rewrite(cmd) => cmd, - other => panic!("Expected Rewrite for '{raw_cmd}', got {other:?}"), - }; - assert!( - rtk_cmd.starts_with("rtk ls"), - "lexer+router should produce rtk ls, got: {rtk_cmd}" - ); - - let raw_out = exec(raw_cmd); - let rtk_out = exec(&rtk_cmd); - let raw_tok = count_tokens(&raw_out); - let rtk_tok = count_tokens(&rtk_out); - assert!(raw_tok > 0, "raw ls -la produced no output"); - - let savings = 100.0 * (1.0 - rtk_tok as f64 / raw_tok as f64); - assert!( - savings >= 40.0, - "rtk ls should save >=40% tokens vs raw ls -la, \ - got {savings:.1}% ({raw_tok} raw -> {rtk_tok} rtk tokens)" - ); - } - - #[test] - #[ignore = "requires installed rtk binary (cargo install --path .) and git repo with history"] - fn test_e2e_git_log_saves_tokens() { - let raw_cmd = "git log --oneline -20"; - let rtk_cmd = match check_for_hook(raw_cmd, "claude") { - HookResult::Rewrite(cmd) => cmd, - other => panic!("Expected Rewrite for '{raw_cmd}', got {other:?}"), - }; - assert!( - rtk_cmd.starts_with("rtk git"), - "lexer+router should produce rtk git log, got: {rtk_cmd}" - ); - - let raw_out = exec(raw_cmd); - let rtk_out = exec(&rtk_cmd); - let raw_tok = count_tokens(&raw_out); - let rtk_tok = count_tokens(&rtk_out); - assert!( - raw_tok > 0, - "raw git log produced no output -- need a repo with commits" - ); - - let ratio = rtk_tok as f64 / raw_tok.max(1) as f64; - assert!( - ratio <= 1.05, - "rtk git log must not significantly bloat output vs raw git log \ - ({raw_tok} raw -> {rtk_tok} rtk, ratio {ratio:.2})" - ); - } - - #[test] - fn test_cat_multi_file_rewrites_to_rtk_read() { - let result = check_for_hook("cat file1.txt file2.txt", "claude"); - assert!( - matches!(&result, HookResult::Rewrite(s) if s == "rtk read file1.txt file2.txt"), - "cat (multi-file) must rewrite to rtk read on this branch; got: {:?}", - result - ); - } - - #[test] - fn test_cat_single_file_rewrites_to_rtk_read() { - let result = check_for_hook("cat CLAUDE.md", "claude"); - assert!( - matches!(&result, HookResult::Rewrite(s) if s == "rtk read CLAUDE.md"), - "cat (single-file) must rewrite to rtk read on this branch; got: {:?}", - result - ); - } - - // ISSUE #196: gh --json/--jq/--template passthrough - #[test] - fn test_gh_json_flag_passes_through() { - assert!(should_passthrough("gh pr list --json number,title")); - assert!(should_passthrough( - "gh pr list --json number --jq '.[].number'" - )); - assert!(should_passthrough("gh pr view 42 --template '{{.title}}'")); - assert!(should_passthrough("gh api repos/owner/repo --jq '.name'")); - } - - #[test] - fn test_gh_without_json_not_passthrough() { - assert!(!should_passthrough("gh pr list")); - assert!(!should_passthrough("gh issue list")); - } - - #[test] - fn test_hook_lookup_git_branch() { - assert_eq!(hook_lookup("git", "branch"), Some(("rtk git", "git"))); - } - - #[test] - fn test_hook_lookup_git_worktree() { - assert_eq!(hook_lookup("git", "worktree"), Some(("rtk git", "git"))); - } - - #[test] - fn test_git_branch_routes_via_hook() { - assert_rewrite("git branch", "rtk git branch"); - } - - #[test] - fn test_git_worktree_list_routes_via_hook() { - assert_rewrite("git worktree list", "rtk git worktree"); - } -} diff --git a/src/cmd/mod.rs b/src/cmd/mod.rs deleted file mode 100644 index 3b1395bd9..000000000 --- a/src/cmd/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -pub(crate) mod analysis; -pub(crate) mod predicates; - -pub(crate) mod builtins; -pub(crate) mod filters; - -pub mod exec; -pub mod hook; - -// Re-export existing lexer from discover module -pub(crate) mod lexer { - pub use crate::discover::lexer::*; -} - -#[cfg(test)] -pub(crate) mod test_helpers; - -pub use hook::check_for_hook; diff --git a/src/cmd/predicates.rs b/src/cmd/predicates.rs deleted file mode 100644 index 034d7fc7d..000000000 --- a/src/cmd/predicates.rs +++ /dev/null @@ -1,75 +0,0 @@ -use std::process::Command; - -pub(crate) fn has_unstaged_changes() -> bool { - Command::new("git") - .args(["diff", "--quiet"]) - .status() - .map(|s| !s.success()) - .unwrap_or(false) -} - -pub(crate) fn is_interactive() -> bool { - use std::io::IsTerminal; - std::io::stderr().is_terminal() -} - -pub(crate) fn expand_tilde(path: &str) -> String { - if path.starts_with('~') { - let home = std::env::var("HOME") - .or_else(|_| std::env::var("USERPROFILE")) - .unwrap_or_else(|_| "/".to_string()); - path.replacen('~', &home, 1) - } else { - path.to_string() - } -} - -pub(crate) fn get_home() -> String { - std::env::var("HOME") - .or_else(|_| std::env::var("USERPROFILE")) - .unwrap_or_else(|_| "/".to_string()) -} - -#[cfg(test)] -mod tests { - use super::*; - use std::env; - - #[test] - fn test_expand_tilde_simple() { - let home = env::var("HOME").unwrap_or("/".to_string()); - assert_eq!(expand_tilde("~/src"), format!("{}/src", home)); - } - - #[test] - fn test_expand_tilde_no_tilde() { - assert_eq!(expand_tilde("/absolute/path"), "/absolute/path"); - } - - #[test] - fn test_expand_tilde_only_tilde() { - let home = env::var("HOME").unwrap_or("/".to_string()); - assert_eq!(expand_tilde("~"), home); - } - - #[test] - fn test_expand_tilde_relative() { - assert_eq!(expand_tilde("relative/path"), "relative/path"); - } - - #[test] - fn test_get_home_returns_something() { - let home = get_home(); - assert!(!home.is_empty()); - } - - #[test] - fn test_is_interactive_returns_false_in_tests() { - assert!(!is_interactive()); - } - - #[test] - fn test_has_unstaged_changes_does_not_panic() { - let _result: bool = has_unstaged_changes(); - } -} diff --git a/src/cmd/test_helpers.rs b/src/cmd/test_helpers.rs deleted file mode 100644 index 14b1f051a..000000000 --- a/src/cmd/test_helpers.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::sync::{Mutex, MutexGuard, OnceLock}; - -static ENV_LOCK: OnceLock> = OnceLock::new(); - -pub struct EnvGuard { - _lock: MutexGuard<'static, ()>, -} - -impl EnvGuard { - pub fn new() -> Self { - let lock = ENV_LOCK - .get_or_init(|| Mutex::new(())) - .lock() - .unwrap_or_else(|e| e.into_inner()); - Self::cleanup(); - Self { _lock: lock } - } - - fn cleanup() { - std::env::remove_var("RTK_SAFE_COMMANDS"); - std::env::remove_var("RTK_BLOCK_TOKEN_WASTE"); - std::env::remove_var("RTK_ACTIVE"); - std::env::remove_var("RTK_DB_PATH"); - std::env::remove_var("RTK_HOOK_ENABLED"); - } -} - -impl Drop for EnvGuard { - fn drop(&mut self) { - Self::cleanup(); - } -} diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 7abf5da0f..3efa7c130 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -203,13 +203,21 @@ fn extract_base_command(cmd: &str) -> &str { } } +/// Quote-aware heredoc detection — `<<` inside quotes is not a heredoc. +pub fn has_heredoc(cmd: &str) -> bool { + tokenize(cmd) + .iter() + .any(|t| t.kind == TokenKind::Redirect && t.value.starts_with("<<")) +} + pub fn split_command_chain(cmd: &str) -> Vec<&str> { let trimmed = cmd.trim(); if trimmed.is_empty() { return vec![]; } - if trimmed.contains("<<") || trimmed.contains("$((") { + // Lexer-based for `<<`; string-based for `$((` (lexer splits it across tokens). + if has_heredoc(trimmed) || trimmed.contains("$((") { return vec![trimmed]; } @@ -346,8 +354,7 @@ pub fn rewrite_command(cmd: &str, excluded: &[String]) -> Option { return None; } - // Heredoc or arithmetic expansion — unsafe to split/rewrite - if trimmed.contains("<<") || trimmed.contains("$((") { + if has_heredoc(trimmed) || trimmed.contains("$((") { return None; } @@ -474,6 +481,10 @@ fn rewrite_line_range(cmd: &str) -> Option { None } +/// Shell prefix builtins that modify how the shell runs a command +/// but don't change which command runs. Strip before routing, re-prepend after. +const SHELL_PREFIX_BUILTINS: &[&str] = &["noglob", "command", "builtin", "exec", "nocorrect"]; + /// Rewrite a single (non-compound) command segment. /// Returns `Some(rewritten)` if matched (including already-RTK pass-through). /// Returns `None` if no match (caller uses original segment). @@ -483,6 +494,21 @@ fn rewrite_segment(seg: &str, excluded: &[String]) -> Option { return None; } + // Peel shell prefix builtins (noglob, command, builtin, exec, nocorrect) + // before routing, re-prepend after. + for &prefix in SHELL_PREFIX_BUILTINS { + if let Some(rest) = strip_word_prefix(trimmed, prefix) { + if rest.is_empty() { + return None; // bare "noglob" etc. — nothing to rewrite + } + // Recursively rewrite the inner command + return match rewrite_segment(rest, excluded) { + Some(rewritten) => Some(format!("{} {}", prefix, rewritten)), + None => None, + }; + } + } + // Strip trailing stderr/stdout redirects before matching (#530) // e.g. "git status 2>&1" → match "git status", re-append " 2>&1" let (cmd_part, redirect_suffix) = strip_trailing_redirects(trimmed); @@ -2399,4 +2425,70 @@ mod tests { vec!["git log $(git rev-parse HEAD~1)"] ); } + + #[test] + fn test_shell_prefix_noglob() { + assert_eq!( + rewrite_command("noglob git status", &[]), + Some("noglob rtk git status".into()) + ); + } + + #[test] + fn test_shell_prefix_command() { + assert_eq!( + rewrite_command("command git status", &[]), + Some("command rtk git status".into()) + ); + } + + #[test] + fn test_shell_prefix_builtin_exec_nocorrect() { + assert_eq!( + rewrite_command("builtin git status", &[]), + Some("builtin rtk git status".into()) + ); + assert_eq!( + rewrite_command("exec git status", &[]), + Some("exec rtk git status".into()) + ); + assert_eq!( + rewrite_command("nocorrect git status", &[]), + Some("nocorrect rtk git status".into()) + ); + } + + #[test] + fn test_shell_prefix_unknown_inner() { + assert_eq!(rewrite_command("noglob unknown_cmd --flag", &[]), None); + } + + #[test] + fn test_python3_m_pytest() { + assert_eq!( + rewrite_command("python3 -m pytest tests/", &[]), + Some("rtk pytest tests/".into()) + ); + } + + #[test] + fn test_pip_show() { + assert_eq!( + rewrite_command("pip show flask", &[]), + Some("rtk pip show flask".into()) + ); + } + + #[test] + fn test_gt_graphite() { + assert_eq!(rewrite_command("gt log", &[]), Some("rtk gt log".into())); + } + + #[test] + fn test_command_no_longer_ignored() { + assert_ne!( + classify_command("command git status"), + Classification::Ignored + ); + } } diff --git a/src/discover/rules.rs b/src/discover/rules.rs index 375867d4c..0b360e184 100644 --- a/src/discover/rules.rs +++ b/src/discover/rules.rs @@ -249,16 +249,16 @@ pub const RULES: &[RtkRule] = &[ subcmd_status: &[], }, RtkRule { - pattern: r"^(python\s+-m\s+)?pytest(\s|$)", + pattern: r"^(python[0-9.]*\s+-m\s+)?pytest(\s|$)", rtk_cmd: "rtk pytest", - rewrite_prefixes: &["python -m pytest", "pytest"], + rewrite_prefixes: &["python3 -m pytest", "python -m pytest", "pytest"], category: "Python", savings_pct: 90.0, subcmd_savings: &[], subcmd_status: &[], }, RtkRule { - pattern: r"^(pip3?|uv\s+pip)\s+(list|outdated|install)", + pattern: r"^(pip3?|uv\s+pip)\s+(list|outdated|install|show)", rtk_cmd: "rtk pip", rewrite_prefixes: &["pip3", "pip", "uv pip"], category: "Python", @@ -641,6 +641,15 @@ pub const RULES: &[RtkRule] = &[ subcmd_savings: &[], subcmd_status: &[], }, + RtkRule { + pattern: r"^gt\s+", + rtk_cmd: "rtk gt", + rewrite_prefixes: &["gt"], + category: "Git", + savings_pct: 70.0, + subcmd_savings: &[], + subcmd_status: &[], + }, ]; pub const IGNORED_PREFIXES: &[&str] = &[ @@ -659,7 +668,6 @@ pub const IGNORED_PREFIXES: &[&str] = &[ "touch ", "which ", "type ", - "command ", "test ", "true", "false", diff --git a/src/hooks/constants.rs b/src/hooks/constants.rs index b2b1d16e9..dc4adc3c2 100644 --- a/src/hooks/constants.rs +++ b/src/hooks/constants.rs @@ -7,3 +7,8 @@ pub const SETTINGS_LOCAL_JSON: &str = "settings.local.json"; pub const HOOKS_JSON: &str = "hooks.json"; pub const PRE_TOOL_USE_KEY: &str = "PreToolUse"; pub const BEFORE_TOOL_KEY: &str = "BeforeTool"; + +/// Native Rust hook command for Claude Code (replaces rtk-rewrite.sh). +pub const CLAUDE_HOOK_COMMAND: &str = "rtk hook claude"; +/// Native Rust hook command for Cursor (replaces rtk-rewrite.sh). +pub const CURSOR_HOOK_COMMAND: &str = "rtk hook cursor"; diff --git a/src/hooks/hook_check.rs b/src/hooks/hook_check.rs index ea3a6f41d..26ce57254 100644 --- a/src/hooks/hook_check.rs +++ b/src/hooks/hook_check.rs @@ -1,6 +1,9 @@ //! Detects whether RTK hooks are installed and warns if they are outdated. -use super::constants::{CLAUDE_DIR, HOOKS_SUBDIR, REWRITE_HOOK_FILE}; +use super::constants::{ + CLAUDE_DIR, CLAUDE_HOOK_COMMAND, HOOKS_SUBDIR, PRE_TOOL_USE_KEY, REWRITE_HOOK_FILE, + SETTINGS_JSON, +}; use crate::core::constants::RTK_DATA_DIR; use std::path::PathBuf; @@ -26,10 +29,23 @@ pub fn status() -> HookStatus { Some(h) => h, None => return HookStatus::Ok, }; - if !home.join(CLAUDE_DIR).exists() { + let claude_dir = home.join(CLAUDE_DIR); + if !claude_dir.exists() { return HookStatus::Ok; } + // Check for new binary command in settings.json first + if binary_hook_registered(&claude_dir) { + // If old script file still exists alongside new command, report Outdated + // (migration not complete — user should run `rtk init -g` to clean up) + let old_hook = claude_dir.join(HOOKS_SUBDIR).join(REWRITE_HOOK_FILE); + if old_hook.exists() { + return HookStatus::Outdated; + } + return HookStatus::Ok; + } + + // Fall back to legacy script file check let Some(hook_path) = hook_installed_path() else { return HookStatus::Missing; }; @@ -43,6 +59,33 @@ pub fn status() -> HookStatus { } } +/// Check if the native binary command is registered in settings.json +fn binary_hook_registered(claude_dir: &std::path::Path) -> bool { + let settings_path = claude_dir.join(SETTINGS_JSON); + let content = match std::fs::read_to_string(&settings_path) { + Ok(c) if !c.trim().is_empty() => c, + _ => return false, + }; + let root: serde_json::Value = match serde_json::from_str(&content) { + Ok(v) => v, + Err(_) => return false, + }; + let pre_tool_use = match root + .get("hooks") + .and_then(|h| h.get(PRE_TOOL_USE_KEY)) + .and_then(|p| p.as_array()) + { + Some(arr) => arr, + None => return false, + }; + pre_tool_use + .iter() + .filter_map(|entry| entry.get("hooks")?.as_array()) + .flatten() + .filter_map(|hook| hook.get("command")?.as_str()) + .any(|cmd| cmd == CLAUDE_HOOK_COMMAND) +} + /// Check if the installed hook is missing or outdated, warn once per day. pub fn maybe_warn() { // Don't block startup — fail silently on any error @@ -148,30 +191,21 @@ mod tests { #[test] fn test_status_returns_valid_variant() { - // Skip on machines without Claude Code or without hook + // Skip on machines without Claude Code let home = match dirs::home_dir() { Some(h) => h, None => return, }; - if !home - .join(".claude") - .join("hooks") - .join("rtk-rewrite.sh") - .exists() - { - // No hook — status should be Missing (if .claude exists) or Ok (if not) - let s = status(); - if home.join(".claude").exists() { - assert_eq!(s, HookStatus::Missing); - } else { - assert_eq!(s, HookStatus::Ok); - } + let claude_dir = home.join(".claude"); + if !claude_dir.exists() { + assert_eq!(status(), HookStatus::Ok); return; } + // With .claude dir present, status must be one of the valid variants let s = status(); assert!( - s == HookStatus::Ok || s == HookStatus::Outdated, - "Expected Ok or Outdated when hook exists, got {:?}", + s == HookStatus::Ok || s == HookStatus::Outdated || s == HookStatus::Missing, + "Expected valid HookStatus variant, got {:?}", s ); } diff --git a/src/hooks/hook_cmd.rs b/src/hooks/hook_cmd.rs index 5d72c0b37..9025a73af 100644 --- a/src/hooks/hook_cmd.rs +++ b/src/hooks/hook_cmd.rs @@ -1,11 +1,15 @@ //! Processes incoming hook calls from AI agents and rewrites commands on the fly. +//! +//! Uses `writeln!(stdout, ...)` instead of `println!` — accidental stdout/stderr +//! corrupts the JSON protocol (Claude Code bug #4669 silently disables the hook). use super::constants::PRE_TOOL_USE_KEY; +use super::permissions::{self, PermissionVerdict}; use anyhow::{Context, Result}; use serde_json::{json, Value}; -use std::io::{self, Read}; +use std::io::{self, Read, Write}; -use crate::discover::registry::rewrite_command; +use crate::discover::registry::{has_heredoc, rewrite_command}; // ── Copilot hook (VS Code + Copilot CLI) ────────────────────── @@ -35,7 +39,7 @@ pub fn run_copilot() -> Result<()> { let v: Value = match serde_json::from_str(input) { Ok(v) => v, Err(e) => { - eprintln!("[rtk hook] Failed to parse JSON input: {e}"); + let _ = writeln!(io::stderr(), "[rtk hook] Failed to parse JSON input: {e}"); return Ok(()); } }; @@ -88,7 +92,7 @@ fn detect_format(v: &Value) -> HookFormat { } fn get_rewritten(cmd: &str) -> Option { - if cmd.contains("<<") { + if has_heredoc(cmd) { return None; } @@ -119,7 +123,7 @@ fn handle_vscode(cmd: &str) -> Result<()> { "updatedInput": { "command": rewritten } } }); - println!("{output}"); + let _ = writeln!(io::stdout(), "{output}"); Ok(()) } @@ -136,15 +140,13 @@ fn handle_copilot_cli(cmd: &str) -> Result<()> { rewritten ) }); - println!("{output}"); + let _ = writeln!(io::stdout(), "{output}"); Ok(()) } // ── Gemini hook ─────────────────────────────────────────────── /// Run the Gemini CLI BeforeTool hook. -/// Reads JSON from stdin, rewrites shell commands to rtk equivalents, -/// outputs JSON to stdout in Gemini CLI format. pub fn run_gemini() -> Result<()> { let mut input = String::new(); io::stdin() @@ -170,8 +172,11 @@ pub fn run_gemini() -> Result<()> { return Ok(()); } - // Delegate to the single source of truth for command rewriting - match rewrite_command(cmd, &[]) { + let excluded = crate::core::config::Config::load() + .map(|c| c.hooks.exclude_commands) + .unwrap_or_default(); + + match rewrite_command(cmd, &excluded) { Some(rewritten) => print_rewrite(&rewritten), None => print_allow(), } @@ -180,7 +185,7 @@ pub fn run_gemini() -> Result<()> { } fn print_allow() { - println!(r#"{{"decision":"allow"}}"#); + let _ = writeln!(io::stdout(), r#"{{"decision":"allow"}}"#); } fn print_rewrite(cmd: &str) { @@ -192,7 +197,225 @@ fn print_rewrite(cmd: &str) { } } }); - println!("{}", output); + let _ = writeln!(io::stdout(), "{}", output); +} + +// ── Audit logging ───────────────────────────────────────────── + +/// Best-effort audit log when RTK_HOOK_AUDIT=1. +fn audit_log(action: &str, original: &str, rewritten: &str) { + if std::env::var("RTK_HOOK_AUDIT").as_deref() != Ok("1") { + return; + } + let _ = audit_log_inner(action, original, rewritten); +} + +fn audit_log_inner(action: &str, original: &str, rewritten: &str) -> Option<()> { + let home = dirs::home_dir()?; + let dir = home.join(".local").join("share").join("rtk"); + std::fs::create_dir_all(&dir).ok()?; + let path = dir.join("hook-audit.log"); + let mut file = std::fs::OpenOptions::new() + .create(true) + .append(true) + .open(path) + .ok()?; + let ts = chrono::Local::now().format("%Y-%m-%dT%H:%M:%S"); + writeln!(file, "{} | {} | {} | {}", ts, action, original, rewritten).ok() +} + +// ── Claude Code native hook ──────────────────────────────────── + +/// Run the Claude Code PreToolUse hook natively. +pub fn run_claude() -> Result<()> { + let mut input = String::new(); + io::stdin() + .read_to_string(&mut input) + .context("Failed to read stdin")?; + + let input = input.trim(); + if input.is_empty() { + return Ok(()); + } + + let v: Value = match serde_json::from_str(input) { + Ok(v) => v, + Err(_) => return Ok(()), + }; + + let cmd = match v + .pointer("/tool_input/command") + .and_then(|c| c.as_str()) + .filter(|c| !c.is_empty()) + { + Some(c) => c.to_string(), + None => return Ok(()), + }; + + let verdict = permissions::check_command(&cmd); + if verdict == PermissionVerdict::Deny { + audit_log("skip:deny_rule", &cmd, ""); + return Ok(()); + } + + let rewritten = match get_rewritten(&cmd) { + Some(r) => r, + None => { + audit_log("skip:no_match", &cmd, ""); + return Ok(()); + } + }; + + audit_log("rewrite", &cmd, &rewritten); + + // Clone original tool_input, replace only "command" + let updated_input = { + let mut ti = v.get("tool_input").cloned().unwrap_or_else(|| json!({})); + if let Some(obj) = ti.as_object_mut() { + obj.insert("command".into(), Value::String(rewritten)); + } + ti + }; + + let mut hook_output = json!({ + "hookEventName": PRE_TOOL_USE_KEY, + "permissionDecisionReason": "RTK auto-rewrite", + "updatedInput": updated_input + }); + + // Only include permissionDecision for Allow (not Ask) + if verdict == PermissionVerdict::Allow { + hook_output + .as_object_mut() + .unwrap() + .insert("permissionDecision".into(), json!("allow")); + } + + let output = json!({ "hookSpecificOutput": hook_output }); + let _ = writeln!(io::stdout(), "{output}"); + Ok(()) +} + +/// Process a Claude hook payload from a string (for testing). +#[cfg(test)] +fn run_claude_inner(input: &str) -> Option { + let v: Value = serde_json::from_str(input).ok()?; + + let cmd = v + .pointer("/tool_input/command") + .and_then(|c| c.as_str()) + .filter(|c| !c.is_empty())?; + + let verdict = permissions::check_command(cmd); + if verdict == PermissionVerdict::Deny { + return None; + } + + let rewritten = get_rewritten(cmd)?; + + let updated_input = { + let mut ti = v.get("tool_input").cloned().unwrap_or_else(|| json!({})); + if let Some(obj) = ti.as_object_mut() { + obj.insert("command".into(), Value::String(rewritten)); + } + ti + }; + + let mut hook_output = json!({ + "hookEventName": PRE_TOOL_USE_KEY, + "permissionDecisionReason": "RTK auto-rewrite", + "updatedInput": updated_input + }); + + if verdict == PermissionVerdict::Allow { + hook_output + .as_object_mut() + .unwrap() + .insert("permissionDecision".into(), json!("allow")); + } + + let output = json!({ "hookSpecificOutput": hook_output }); + Some(output.to_string()) +} + +// ── Cursor native hook ───────────────────────────────────────── + +/// Run the Cursor Agent hook natively. +pub fn run_cursor() -> Result<()> { + let mut input = String::new(); + io::stdin() + .read_to_string(&mut input) + .context("Failed to read stdin")?; + + let input = input.trim(); + if input.is_empty() { + let _ = writeln!(io::stdout(), "{{}}"); + return Ok(()); + } + + let v: Value = match serde_json::from_str(input) { + Ok(v) => v, + Err(_) => { + let _ = writeln!(io::stdout(), "{{}}"); + return Ok(()); + } + }; + + let cmd = match v + .pointer("/tool_input/command") + .and_then(|c| c.as_str()) + .filter(|c| !c.is_empty()) + { + Some(c) => c.to_string(), + None => { + let _ = writeln!(io::stdout(), "{{}}"); + return Ok(()); + } + }; + + let rewritten = match get_rewritten(&cmd) { + Some(r) => r, + None => { + let _ = writeln!(io::stdout(), "{{}}"); + return Ok(()); + } + }; + + let output = json!({ + "permission": "allow", + "updated_input": { "command": rewritten } + }); + let _ = writeln!(io::stdout(), "{output}"); + Ok(()) +} + +/// Process a Cursor hook payload from a string (for testing). +#[cfg(test)] +fn run_cursor_inner(input: &str) -> String { + let v: Value = match serde_json::from_str(input) { + Ok(v) => v, + Err(_) => return "{}".to_string(), + }; + + let cmd = match v + .pointer("/tool_input/command") + .and_then(|c| c.as_str()) + .filter(|c| !c.is_empty()) + { + Some(c) => c.to_string(), + None => return "{}".to_string(), + }; + + match get_rewritten(&cmd) { + Some(rewritten) => { + let output = json!({ + "permission": "allow", + "updated_input": { "command": rewritten } + }); + output.to_string() + } + None => "{}".to_string(), + } } #[cfg(test)] @@ -272,7 +495,6 @@ mod tests { #[test] fn test_print_allow_format() { - // Verify the allow JSON format matches Gemini CLI expectations let expected = r#"{"decision":"allow"}"#; assert_eq!(expected, r#"{"decision":"allow"}"#); } @@ -297,7 +519,6 @@ mod tests { #[test] fn test_gemini_hook_uses_rewrite_command() { - // Verify that rewrite_command handles the cases we need for Gemini assert_eq!( rewrite_command("git status", &[]), Some("rtk git status".into()) @@ -306,12 +527,10 @@ mod tests { rewrite_command("cargo test", &[]), Some("rtk cargo test".into()) ); - // Already rtk → returned as-is (idempotent) assert_eq!( rewrite_command("rtk git status", &[]), Some("rtk git status".into()) ); - // Heredoc → no rewrite assert_eq!(rewrite_command("cat < String { + json!({ + "tool_name": "Bash", + "tool_input": { "command": cmd } + }) + .to_string() + } + + fn claude_input_with_fields(cmd: &str, timeout: u64, description: &str) -> String { + json!({ + "tool_name": "Bash", + "tool_input": { + "command": cmd, + "timeout": timeout, + "description": description + } + }) + .to_string() + } + + #[test] + fn test_claude_rewrite_git_status() { + let result = run_claude_inner(&claude_input("git status")).unwrap(); + let v: Value = serde_json::from_str(&result).unwrap(); + let cmd = v + .pointer("/hookSpecificOutput/updatedInput/command") + .and_then(|c| c.as_str()) + .unwrap(); + assert_eq!(cmd, "rtk git status"); + } + + #[test] + fn test_claude_rewrite_preserves_tool_input_fields() { + let input = claude_input_with_fields("git status", 30000, "Check repo status"); + let result = run_claude_inner(&input).unwrap(); + let v: Value = serde_json::from_str(&result).unwrap(); + let updated = &v["hookSpecificOutput"]["updatedInput"]; + assert_eq!(updated["command"], "rtk git status"); + assert_eq!(updated["timeout"], 30000); + assert_eq!(updated["description"], "Check repo status"); + } + + #[test] + fn test_claude_passthrough_no_output() { + assert!(run_claude_inner(&claude_input("htop")).is_none()); + } + + #[test] + fn test_claude_heredoc_passthrough() { + assert!(run_claude_inner(&claude_input("cat < String { + json!({ + "tool_name": "Bash", + "tool_input": { "command": cmd } + }) + .to_string() + } + + #[test] + fn test_cursor_rewrite_flat_format() { + let result = run_cursor_inner(&cursor_input("git status")); + let v: Value = serde_json::from_str(&result).unwrap(); + assert_eq!(v["permission"], "allow"); + assert_eq!(v["updated_input"]["command"], "rtk git status"); + assert!(v.get("hookSpecificOutput").is_none()); + } + + #[test] + fn test_cursor_passthrough_empty_json() { + let result = run_cursor_inner(&cursor_input("htop")); + assert_eq!(result, "{}"); + } + + #[test] + fn test_cursor_empty_input_empty_json() { + let result = run_cursor_inner(""); + assert_eq!(result, "{}"); + } + + #[test] + fn test_cursor_heredoc_passthrough() { + let result = run_cursor_inner(&cursor_input("cat < = content.trim().split(" | ").collect(); + assert_eq!( + parts.len(), + 4, + "Expected 4 pipe-delimited fields, got: {:?}", + parts + ); + assert_eq!(parts[1], "rewrite"); + assert_eq!(parts[2], "git status"); + assert_eq!(parts[3], "rtk git status"); + + let _ = std::fs::remove_dir_all(&tmp); + } } diff --git a/src/hooks/init.rs b/src/hooks/init.rs index ab64920f5..d1e4d9b3d 100644 --- a/src/hooks/init.rs +++ b/src/hooks/init.rs @@ -7,18 +7,12 @@ use std::path::{Path, PathBuf}; use tempfile::NamedTempFile; use super::constants::{ - BEFORE_TOOL_KEY, CLAUDE_DIR, GEMINI_HOOK_FILE, HOOKS_JSON, HOOKS_SUBDIR, PRE_TOOL_USE_KEY, - REWRITE_HOOK_FILE, SETTINGS_JSON, + BEFORE_TOOL_KEY, CLAUDE_DIR, CLAUDE_HOOK_COMMAND, CURSOR_HOOK_COMMAND, GEMINI_HOOK_FILE, + HOOKS_JSON, HOOKS_SUBDIR, PRE_TOOL_USE_KEY, REWRITE_HOOK_FILE, SETTINGS_JSON, }; use super::integrity; use crate::core::config; -// Embedded hook script (guards before set -euo pipefail) -const REWRITE_HOOK: &str = include_str!("../../hooks/claude/rtk-rewrite.sh"); - -// Embedded Cursor hook script (preToolUse format) -const CURSOR_REWRITE_HOOK: &str = include_str!("../../hooks/cursor/rtk-rewrite.sh"); - // Embedded OpenCode plugin (auto-rewrite) const OPENCODE_PLUGIN: &str = include_str!("../../hooks/opencode/rtk.ts"); @@ -305,62 +299,6 @@ pub fn run( Ok(()) } -/// Prepare hook directory and return paths (hook_dir, hook_path) -fn prepare_hook_paths() -> Result<(PathBuf, PathBuf)> { - let claude_dir = resolve_claude_dir()?; - let hook_dir = claude_dir.join("hooks"); - fs::create_dir_all(&hook_dir) - .with_context(|| format!("Failed to create hook directory: {}", hook_dir.display()))?; - let hook_path = hook_dir.join(REWRITE_HOOK_FILE); - Ok((hook_dir, hook_path)) -} - -/// Write hook file if missing or outdated, return true if changed -#[cfg(unix)] -fn ensure_hook_installed(hook_path: &Path, verbose: u8) -> Result { - let changed = if hook_path.exists() { - let existing = fs::read_to_string(hook_path) - .with_context(|| format!("Failed to read existing hook: {}", hook_path.display()))?; - - if existing == REWRITE_HOOK { - if verbose > 0 { - eprintln!("Hook already up to date: {}", hook_path.display()); - } - false - } else { - fs::write(hook_path, REWRITE_HOOK) - .with_context(|| format!("Failed to write hook to {}", hook_path.display()))?; - if verbose > 0 { - eprintln!("Updated hook: {}", hook_path.display()); - } - true - } - } else { - fs::write(hook_path, REWRITE_HOOK) - .with_context(|| format!("Failed to write hook to {}", hook_path.display()))?; - if verbose > 0 { - eprintln!("Created hook: {}", hook_path.display()); - } - true - }; - - // Set executable permissions - use std::os::unix::fs::PermissionsExt; - fs::set_permissions(hook_path, fs::Permissions::from_mode(0o755)) - .with_context(|| format!("Failed to set hook permissions: {}", hook_path.display()))?; - - // Store SHA-256 hash for runtime integrity verification. - // Always store (idempotent) to ensure baseline exists even for - // hooks installed before integrity checks were added. - integrity::store_hash(hook_path) - .with_context(|| format!("Failed to store integrity hash for {}", hook_path.display()))?; - if verbose > 0 && changed { - eprintln!("Stored integrity hash for hook"); - } - - Ok(changed) -} - /// Idempotent file write: create or update if content differs fn write_if_changed(path: &Path, content: &str, name: &str, verbose: u8) -> Result { if path.exists() { @@ -446,13 +384,13 @@ fn prompt_user_consent(settings_path: &Path) -> Result { } /// Print manual instructions for settings.json patching -fn print_manual_instructions(hook_path: &Path, include_opencode: bool) { +fn print_manual_instructions(hook_command: &str, include_opencode: bool) { println!("\n MANUAL STEP: Add this to ~/.claude/settings.json:"); println!(" {{"); println!(" \"hooks\": {{ \"PreToolUse\": [{{"); println!(" \"matcher\": \"Bash\","); println!(" \"hooks\": [{{ \"type\": \"command\","); - println!(" \"command\": \"{}\"", hook_path.display()); + println!(" \"command\": \"{}\"", hook_command); println!(" }}]"); println!(" }}]}}"); println!(" }}"); @@ -482,7 +420,8 @@ fn remove_hook_from_json(root: &mut serde_json::Value) -> bool { if let Some(hooks_array) = entry.get("hooks").and_then(|h| h.as_array()) { for hook in hooks_array { if let Some(command) = hook.get("command").and_then(|c| c.as_str()) { - if command.contains(REWRITE_HOOK_FILE) { + // Match both legacy script path and new binary command + if command.contains(REWRITE_HOOK_FILE) || command == CLAUDE_HOOK_COMMAND { return false; } } @@ -585,12 +524,12 @@ pub fn uninstall(global: bool, gemini: bool, codex: bool, cursor: bool, verbose: return Ok(()); } - // 1. Remove hook file + // 1. Remove legacy hook file (if exists from old installation) let hook_path = claude_dir.join(HOOKS_SUBDIR).join(REWRITE_HOOK_FILE); if hook_path.exists() { fs::remove_file(&hook_path) .with_context(|| format!("Failed to remove hook: {}", hook_path.display()))?; - removed.push(format!("Hook: {}", hook_path.display())); + removed.push(format!("Hook script: {}", hook_path.display())); } // 1b. Remove integrity hash file @@ -701,19 +640,16 @@ fn uninstall_codex_at(codex_dir: &Path, verbose: u8) -> Result> { Ok(removed) } -/// Orchestrator: patch settings.json with RTK hook +/// Orchestrator: patch settings.json with RTK hook (binary command variant) /// Handles reading, checking, prompting, merging, backing up, and atomic writing -fn patch_settings_json( - hook_path: &Path, +fn patch_settings_json_command( + hook_command: &str, mode: PatchMode, verbose: u8, include_opencode: bool, ) -> Result { let claude_dir = resolve_claude_dir()?; let settings_path = claude_dir.join(SETTINGS_JSON); - let hook_command = hook_path - .to_str() - .context("Hook path contains invalid UTF-8")?; // Read or create settings.json let mut root = if settings_path.exists() { @@ -741,12 +677,12 @@ fn patch_settings_json( // Handle mode match mode { PatchMode::Skip => { - print_manual_instructions(hook_path, include_opencode); + print_manual_instructions(hook_command, include_opencode); return Ok(PatchResult::Skipped); } PatchMode::Ask => { if !prompt_user_consent(&settings_path)? { - print_manual_instructions(hook_path, include_opencode); + print_manual_instructions(hook_command, include_opencode); return Ok(PatchResult::Declined); } } @@ -856,7 +792,7 @@ fn insert_hook_entry(root: &mut serde_json::Value, hook_command: &str) { } /// Check if RTK hook is already present in settings.json -/// Matches on rtk-rewrite.sh substring to handle different path formats +/// Matches on legacy rtk-rewrite.sh path OR new `rtk hook claude` command fn hook_already_present(root: &serde_json::Value, hook_command: &str) -> bool { let pre_tool_use_array = match root .get("hooks") @@ -873,8 +809,7 @@ fn hook_already_present(root: &serde_json::Value, hook_command: &str) -> bool { .flatten() .filter_map(|hook| hook.get("command")?.as_str()) .any(|cmd| { - cmd == hook_command - || (cmd.contains(REWRITE_HOOK_FILE) && hook_command.contains(REWRITE_HOOK_FILE)) + cmd == hook_command || cmd == CLAUDE_HOOK_COMMAND || cmd.contains(REWRITE_HOOK_FILE) }) } @@ -910,9 +845,8 @@ fn run_default_mode( let rtk_md_path = claude_dir.join(RTK_MD); let claude_md_path = claude_dir.join(CLAUDE_MD); - // 1. Prepare hook directory and install hook - let (_hook_dir, hook_path) = prepare_hook_paths()?; - let hook_changed = ensure_hook_installed(&hook_path, verbose)?; + // 1. Migrate old hook script if present + migrate_old_hook_script(verbose); // 2. Write RTK.md write_if_changed(&rtk_md_path, RTK_SLIM, RTK_MD, verbose)?; @@ -929,13 +863,8 @@ fn run_default_mode( let migrated = patch_claude_md(&claude_md_path, verbose)?; // 4. Print success message - let hook_status = if hook_changed { - "installed/updated" - } else { - "already up to date" - }; - println!("\nRTK hook {} (global).\n", hook_status); - println!(" Hook: {}", hook_path.display()); + println!("\nRTK hook registered (global).\n"); + println!(" Command: {}", CLAUDE_HOOK_COMMAND); println!(" RTK.md: {} (10 lines)", rtk_md_path.display()); if let Some(path) = &opencode_plugin_path { println!(" OpenCode: {}", path.display()); @@ -947,13 +876,14 @@ fn run_default_mode( println!(" replaced with @RTK.md (10 lines)"); } - // 5. Patch settings.json - let patch_result = patch_settings_json(&hook_path, patch_mode, verbose, install_opencode)?; + // 5. Patch settings.json with binary command + let patch_result = + patch_settings_json_command(CLAUDE_HOOK_COMMAND, patch_mode, verbose, install_opencode)?; // Report result match patch_result { PatchResult::Patched => { - // Already printed by patch_settings_json + // Already printed by patch_settings_json_command } PatchResult::AlreadyPresent => { println!("\n settings.json: hook already present"); @@ -964,7 +894,7 @@ fn run_default_mode( } } PatchResult::Declined | PatchResult::Skipped => { - // Manual instructions already printed by patch_settings_json + // Manual instructions already printed } } @@ -976,6 +906,39 @@ fn run_default_mode( Ok(()) } +/// Migrate old hook script to new binary command. +/// Deletes `~/.claude/hooks/rtk-rewrite.sh` and `.rtk-hook.sha256` if present. +fn migrate_old_hook_script(verbose: u8) { + if let Some(home) = dirs::home_dir() { + let old_hook = home + .join(CLAUDE_DIR) + .join(HOOKS_SUBDIR) + .join(REWRITE_HOOK_FILE); + if old_hook.exists() { + if let Err(e) = std::fs::remove_file(&old_hook) { + if verbose > 0 { + eprintln!(" [warn] Failed to remove old hook script: {e}"); + } + } else if verbose > 0 { + eprintln!(" [ok] Removed old hook script: {}", old_hook.display()); + } + } + // Remove legacy hash file + let hash_file = home + .join(CLAUDE_DIR) + .join(HOOKS_SUBDIR) + .join(".rtk-hook.sha256"); + if hash_file.exists() { + let _ = std::fs::remove_file(&hash_file); + } + // Remove Cursor legacy hook + let cursor_hook = home.join(".cursor").join("hooks").join(REWRITE_HOOK_FILE); + if cursor_hook.exists() { + let _ = std::fs::remove_file(&cursor_hook); + } + } +} + /// Generate .rtk/filters.toml template in the current directory if not present. fn generate_project_filters_template(verbose: u8) -> Result<()> { let rtk_dir = std::path::Path::new(".rtk"); @@ -1049,9 +1012,8 @@ fn run_hook_only_mode( return Ok(()); } - // Prepare and install hook - let (_hook_dir, hook_path) = prepare_hook_paths()?; - let hook_changed = ensure_hook_installed(&hook_path, verbose)?; + // Migrate old hook script if present + migrate_old_hook_script(verbose); let opencode_plugin_path = if install_opencode { let path = prepare_opencode_plugin_path()?; @@ -1061,13 +1023,8 @@ fn run_hook_only_mode( None }; - let hook_status = if hook_changed { - "installed/updated" - } else { - "already up to date" - }; - println!("\nRTK hook {} (hook-only mode).\n", hook_status); - println!(" Hook: {}", hook_path.display()); + println!("\nRTK hook registered (hook-only mode).\n"); + println!(" Command: {}", CLAUDE_HOOK_COMMAND); if let Some(path) = &opencode_plugin_path { println!(" OpenCode: {}", path.display()); } @@ -1075,13 +1032,14 @@ fn run_hook_only_mode( " Note: No RTK.md created. Claude won't know about meta commands (gain, discover, proxy)." ); - // Patch settings.json - let patch_result = patch_settings_json(&hook_path, patch_mode, verbose, install_opencode)?; + // Patch settings.json with binary command + let patch_result = + patch_settings_json_command(CLAUDE_HOOK_COMMAND, patch_mode, verbose, install_opencode)?; // Report result match patch_result { PatchResult::Patched => { - // Already printed by patch_settings_json + // Already printed by patch_settings_json_command } PatchResult::AlreadyPresent => { println!("\n settings.json: hook already present"); @@ -1092,7 +1050,7 @@ fn run_hook_only_mode( } } PatchResult::Declined | PatchResult::Skipped => { - // Manual instructions already printed by patch_settings_json + // Manual instructions already printed } } @@ -1592,44 +1550,29 @@ fn resolve_cursor_dir() -> Result { resolve_home_subdir(".cursor") } -/// Install Cursor hooks: hook script + hooks.json +/// Install Cursor hooks: register binary command in hooks.json fn install_cursor_hooks(verbose: u8) -> Result<()> { let cursor_dir = resolve_cursor_dir()?; - let hooks_dir = cursor_dir.join("hooks"); - fs::create_dir_all(&hooks_dir).with_context(|| { - format!( - "Failed to create Cursor hooks directory: {}", - hooks_dir.display() - ) - })?; - // 1. Write hook script - let hook_path = hooks_dir.join(REWRITE_HOOK_FILE); - let hook_changed = write_if_changed(&hook_path, CURSOR_REWRITE_HOOK, "Cursor hook", verbose)?; - - #[cfg(unix)] - { - use std::os::unix::fs::PermissionsExt; - fs::set_permissions(&hook_path, fs::Permissions::from_mode(0o755)).with_context(|| { - format!( - "Failed to set Cursor hook permissions: {}", - hook_path.display() - ) - })?; + // Migrate old hook script if present + let old_hook = cursor_dir.join("hooks").join(REWRITE_HOOK_FILE); + if old_hook.exists() { + let _ = fs::remove_file(&old_hook); + if verbose > 0 { + eprintln!( + " [ok] Removed old Cursor hook script: {}", + old_hook.display() + ); + } } - // 2. Create or patch hooks.json + // Create or patch hooks.json with binary command let hooks_json_path = cursor_dir.join(HOOKS_JSON); let patched = patch_cursor_hooks_json(&hooks_json_path, verbose)?; // Report - let hook_status = if hook_changed { - "installed/updated" - } else { - "already up to date" - }; - println!("\nCursor hook {} (global).\n", hook_status); - println!(" Hook: {}", hook_path.display()); + println!("\nCursor hook registered (global).\n"); + println!(" Command: {}", CURSOR_HOOK_COMMAND); println!(" hooks.json: {}", hooks_json_path.display()); if patched { @@ -1689,6 +1632,7 @@ fn patch_cursor_hooks_json(path: &Path, verbose: u8) -> Result { } /// Check if RTK preToolUse hook is already present in Cursor hooks.json +/// Matches on legacy rtk-rewrite.sh path OR new `rtk hook cursor` command fn cursor_hook_already_present(root: &serde_json::Value) -> bool { let hooks = match root .get("hooks") @@ -1703,7 +1647,7 @@ fn cursor_hook_already_present(root: &serde_json::Value) -> bool { entry .get("command") .and_then(|c| c.as_str()) - .is_some_and(|cmd| cmd.contains(REWRITE_HOOK_FILE)) + .is_some_and(|cmd| cmd.contains(REWRITE_HOOK_FILE) || cmd == CURSOR_HOOK_COMMAND) }) } @@ -1734,7 +1678,7 @@ fn insert_cursor_hook_entry(root: &mut serde_json::Value) { .expect("preToolUse must be an array"); pre_tool_use.push(serde_json::json!({ - "command": "./hooks/rtk-rewrite.sh", + "command": CURSOR_HOOK_COMMAND, "matcher": "Shell" })); } @@ -1783,6 +1727,7 @@ fn remove_cursor_hooks(verbose: u8) -> Result> { /// Remove RTK preToolUse entry from Cursor hooks.json /// Returns true if entry was found and removed +/// Matches both legacy script path and new binary command fn remove_cursor_hook_from_json(root: &mut serde_json::Value) -> bool { let pre_tool_use = match root .get_mut("hooks") @@ -1798,7 +1743,7 @@ fn remove_cursor_hook_from_json(root: &mut serde_json::Value) -> bool { !entry .get("command") .and_then(|c| c.as_str()) - .is_some_and(|cmd| cmd.contains(REWRITE_HOOK_FILE)) + .is_some_and(|cmd| cmd.contains(REWRITE_HOOK_FILE) || cmd == CURSOR_HOOK_COMMAND) }); pre_tool_use.len() < original_len @@ -1822,8 +1767,22 @@ fn show_claude_config() -> Result<()> { println!("rtk Configuration:\n"); - // Check hook - if hook_path.exists() { + // Check hook: prefer binary command detection, fall back to script file + let settings_path = claude_dir.join(SETTINGS_JSON); + let binary_hook_registered = if settings_path.exists() { + let content = fs::read_to_string(&settings_path).unwrap_or_default(); + if let Ok(root) = serde_json::from_str::(&content) { + hook_already_present(&root, CLAUDE_HOOK_COMMAND) + } else { + false + } + } else { + false + }; + + if binary_hook_registered { + println!("[ok] Hook: {} (native binary command)", CLAUDE_HOOK_COMMAND); + } else if hook_path.exists() { #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; @@ -1844,15 +1803,12 @@ fn show_claude_config() -> Result<()> { ); } else if !is_thin_delegator { println!( - "[warn] Hook: {} (outdated — inline logic, not thin delegator)", + "[warn] Hook: {} (outdated — run `rtk init -g` to upgrade to native binary)", hook_path.display() ); - println!( - " → Run `rtk init --global` to upgrade to the single source of truth hook" - ); } else if is_executable && has_guards { println!( - "[ok] Hook: {} (thin delegator, version {})", + "[warn] Hook: {} (legacy script v{} — run `rtk init -g` to upgrade)", hook_path.display(), hook_version ); @@ -1866,7 +1822,10 @@ fn show_claude_config() -> Result<()> { #[cfg(not(unix))] { - println!("[ok] Hook: {} (exists)", hook_path.display()); + println!( + "[warn] Hook: {} (legacy script — run `rtk init -g` to upgrade)", + hook_path.display() + ); } } else { println!("[--] Hook: not found"); @@ -1879,23 +1838,25 @@ fn show_claude_config() -> Result<()> { println!("[--] RTK.md: not found"); } - // Check hook integrity - match integrity::verify_hook_at(&hook_path) { - Ok(integrity::IntegrityStatus::Verified) => { - println!("[ok] Integrity: hook hash verified"); - } - Ok(integrity::IntegrityStatus::Tampered { .. }) => { - println!("[FAIL] Integrity: hook modified outside rtk init (run: rtk verify)"); - } - Ok(integrity::IntegrityStatus::NoBaseline) => { - println!("[warn] Integrity: no baseline hash (run: rtk init -g to establish)"); - } - Ok(integrity::IntegrityStatus::NotInstalled) - | Ok(integrity::IntegrityStatus::OrphanedHash) => { - // Don't show integrity line if hook isn't installed - } - Err(_) => { - println!("[warn] Integrity: check failed"); + // Check hook integrity (only relevant for legacy script hooks) + if hook_path.exists() && !binary_hook_registered { + match integrity::verify_hook_at(&hook_path) { + Ok(integrity::IntegrityStatus::Verified) => { + println!("[ok] Integrity: hook hash verified"); + } + Ok(integrity::IntegrityStatus::Tampered { .. }) => { + println!("[FAIL] Integrity: hook modified outside rtk init (run: rtk verify)"); + } + Ok(integrity::IntegrityStatus::NoBaseline) => { + println!("[warn] Integrity: no baseline hash (run: rtk init -g to establish)"); + } + Ok(integrity::IntegrityStatus::NotInstalled) + | Ok(integrity::IntegrityStatus::OrphanedHash) => { + // Don't show integrity line if hook isn't installed + } + Err(_) => { + println!("[warn] Integrity: check failed"); + } } } @@ -1927,14 +1888,12 @@ fn show_claude_config() -> Result<()> { println!("[--] Local (./CLAUDE.md): not found"); } - // Check settings.json - let settings_path = claude_dir.join(SETTINGS_JSON); + // Check settings.json (detailed status) if settings_path.exists() { let content = fs::read_to_string(&settings_path)?; if !content.trim().is_empty() { if let Ok(root) = serde_json::from_str::(&content) { - let hook_command = hook_path.display().to_string(); - if hook_already_present(&root, &hook_command) { + if hook_already_present(&root, CLAUDE_HOOK_COMMAND) { println!("[ok] settings.json: RTK hook configured"); } else { println!("[warn] settings.json: exists but RTK hook not configured"); @@ -1967,28 +1926,37 @@ fn show_claude_config() -> Result<()> { let cursor_hook = cursor_dir.join(HOOKS_SUBDIR).join(REWRITE_HOOK_FILE); let cursor_hooks_json = cursor_dir.join(HOOKS_JSON); - if cursor_hook.exists() { + // Check for binary command in hooks.json first + let cursor_binary_registered = if cursor_hooks_json.exists() { + let content = fs::read_to_string(&cursor_hooks_json).unwrap_or_default(); + if let Ok(root) = serde_json::from_str::(&content) { + cursor_hook_already_present(&root) + } else { + false + } + } else { + false + }; + + if cursor_binary_registered { + println!("[ok] Cursor hook: registered in hooks.json"); + } else if cursor_hook.exists() { #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; let meta = fs::metadata(&cursor_hook)?; let is_executable = meta.permissions().mode() & 0o111 != 0; let content = fs::read_to_string(&cursor_hook)?; - let is_thin = content.contains("rtk rewrite"); + let _is_thin = content.contains("rtk rewrite"); if !is_executable { println!( - "[warn] Cursor hook: {} (NOT executable - run: chmod +x)", - cursor_hook.display() - ); - } else if is_thin { - println!( - "[ok] Cursor hook: {} (thin delegator)", + "[warn] Cursor hook: {} (legacy script, NOT executable)", cursor_hook.display() ); } else { println!( - "[warn] Cursor hook: {} (outdated - missing rtk rewrite delegation)", + "[warn] Cursor hook: {} (legacy script — run `rtk init -g --agent cursor` to upgrade)", cursor_hook.display() ); } @@ -1996,31 +1964,11 @@ fn show_claude_config() -> Result<()> { #[cfg(not(unix))] { - println!("[ok] Cursor hook: {} (exists)", cursor_hook.display()); + println!("[warn] Cursor hook: {} (legacy script — run `rtk init -g --agent cursor` to upgrade)", cursor_hook.display()); } } else { println!("[--] Cursor hook: not found"); } - - if cursor_hooks_json.exists() { - let content = fs::read_to_string(&cursor_hooks_json)?; - if !content.trim().is_empty() { - if let Ok(root) = serde_json::from_str::(&content) { - if cursor_hook_already_present(&root) { - println!("[ok] Cursor hooks.json: RTK preToolUse configured"); - } else { - println!("[warn] Cursor hooks.json: exists but RTK not configured"); - println!(" Run: rtk init -g --agent cursor"); - } - } else { - println!("[warn] Cursor hooks.json: exists but invalid JSON"); - } - } else { - println!("[--] Cursor hooks.json: empty"); - } - } else { - println!("[--] Cursor hooks.json: not found"); - } } else { println!("[--] Cursor: home dir not found"); } @@ -2434,20 +2382,6 @@ mod tests { ); } - #[test] - fn test_hook_has_guards() { - assert!(REWRITE_HOOK.contains("command -v rtk")); - assert!(REWRITE_HOOK.contains("command -v jq")); - // Guards (rtk/jq availability checks) must appear before the actual delegation call. - // The thin delegating hook no longer uses set -euo pipefail. - let jq_pos = REWRITE_HOOK.find("command -v jq").unwrap(); - let rtk_delegate_pos = REWRITE_HOOK.find("rtk rewrite \"$CMD\"").unwrap(); - assert!( - jq_pos < rtk_delegate_pos, - "Guards must appear before rtk rewrite delegation" - ); - } - #[test] fn test_migration_removes_old_block() { let input = r#"# My Config @@ -2508,23 +2442,15 @@ More content"#; } #[test] - #[cfg(unix)] - fn test_default_mode_creates_hook_and_rtk_md() { + fn test_default_mode_creates_rtk_md() { let temp = TempDir::new().unwrap(); - let hook_path = temp.path().join("rtk-rewrite.sh"); let rtk_md_path = temp.path().join("RTK.md"); - fs::write(&hook_path, REWRITE_HOOK).unwrap(); fs::write(&rtk_md_path, RTK_SLIM).unwrap(); - - use std::os::unix::fs::PermissionsExt; - fs::set_permissions(&hook_path, fs::Permissions::from_mode(0o755)).unwrap(); - - assert!(hook_path.exists()); assert!(rtk_md_path.exists()); - let metadata = fs::metadata(&hook_path).unwrap(); - assert!(metadata.permissions().mode() & 0o111 != 0); + let content = fs::read_to_string(&rtk_md_path).unwrap(); + assert_eq!(content, RTK_SLIM); } #[test] @@ -2766,6 +2692,23 @@ More notes assert!(!hook_already_present(&json_content, hook_command)); } + #[test] + fn test_hook_already_present_new_command() { + let json_content = serde_json::json!({ + "hooks": { + "PreToolUse": [{ + "matcher": "Bash", + "hooks": [{ + "type": "command", + "command": CLAUDE_HOOK_COMMAND + }] + }] + } + }); + + assert!(hook_already_present(&json_content, CLAUDE_HOOK_COMMAND)); + } + #[test] fn test_hook_not_present_other_hooks() { let json_content = serde_json::json!({ @@ -2943,6 +2886,40 @@ More notes assert_eq!(command, "/some/other/hook.sh"); } + #[test] + fn test_remove_hook_from_json_new_command() { + let mut json_content = serde_json::json!({ + "hooks": { + "PreToolUse": [ + { + "matcher": "Bash", + "hooks": [{ + "type": "command", + "command": "/some/other/hook.sh" + }] + }, + { + "matcher": "Bash", + "hooks": [{ + "type": "command", + "command": CLAUDE_HOOK_COMMAND + }] + } + ] + } + }); + + let removed = remove_hook_from_json(&mut json_content); + assert!(removed); + + let pre_tool_use = json_content["hooks"]["PreToolUse"].as_array().unwrap(); + assert_eq!(pre_tool_use.len(), 1); + assert_eq!( + pre_tool_use[0]["hooks"][0]["command"].as_str().unwrap(), + "/some/other/hook.sh" + ); + } + #[test] fn test_remove_hook_when_not_present() { let mut json_content = serde_json::json!({ @@ -2964,7 +2941,7 @@ More notes // ─── Cursor hooks.json tests ─── #[test] - fn test_cursor_hook_already_present_true() { + fn test_cursor_hook_already_present_legacy_script() { let json_content = serde_json::json!({ "version": 1, "hooks": { @@ -2977,6 +2954,20 @@ More notes assert!(cursor_hook_already_present(&json_content)); } + #[test] + fn test_cursor_hook_already_present_new_command() { + let json_content = serde_json::json!({ + "version": 1, + "hooks": { + "preToolUse": [{ + "command": CURSOR_HOOK_COMMAND, + "matcher": "Shell" + }] + } + }); + assert!(cursor_hook_already_present(&json_content)); + } + #[test] fn test_cursor_hook_already_present_false_empty() { let json_content = serde_json::json!({ "version": 1 }); @@ -3004,7 +2995,7 @@ More notes let hooks = json_content["hooks"]["preToolUse"].as_array().unwrap(); assert_eq!(hooks.len(), 1); - assert_eq!(hooks[0]["command"], "./hooks/rtk-rewrite.sh"); + assert_eq!(hooks[0]["command"], CURSOR_HOOK_COMMAND); assert_eq!(hooks[0]["matcher"], "Shell"); assert_eq!(json_content["version"], 1); } @@ -3029,7 +3020,7 @@ More notes let pre_tool_use = json_content["hooks"]["preToolUse"].as_array().unwrap(); assert_eq!(pre_tool_use.len(), 2); assert_eq!(pre_tool_use[0]["command"], "./hooks/other.sh"); - assert_eq!(pre_tool_use[1]["command"], "./hooks/rtk-rewrite.sh"); + assert_eq!(pre_tool_use[1]["command"], CURSOR_HOOK_COMMAND); // afterFileEdit should be preserved assert!(json_content["hooks"]["afterFileEdit"].is_array()); @@ -3056,36 +3047,37 @@ More notes } #[test] - fn test_remove_cursor_hook_not_present() { + fn test_remove_cursor_hook_from_json_new_command() { let mut json_content = serde_json::json!({ "version": 1, "hooks": { "preToolUse": [ - { "command": "./hooks/other.sh", "matcher": "Shell" } + { "command": "./hooks/other.sh", "matcher": "Shell" }, + { "command": CURSOR_HOOK_COMMAND, "matcher": "Shell" } ] } }); let removed = remove_cursor_hook_from_json(&mut json_content); - assert!(!removed); - } + assert!(removed); - #[test] - fn test_cursor_hook_script_has_guards() { - assert!(CURSOR_REWRITE_HOOK.contains("command -v rtk")); - assert!(CURSOR_REWRITE_HOOK.contains("command -v jq")); - let jq_pos = CURSOR_REWRITE_HOOK.find("command -v jq").unwrap(); - let rtk_delegate_pos = CURSOR_REWRITE_HOOK.find("rtk rewrite \"$CMD\"").unwrap(); - assert!( - jq_pos < rtk_delegate_pos, - "Guards must appear before rtk rewrite delegation" - ); + let hooks = json_content["hooks"]["preToolUse"].as_array().unwrap(); + assert_eq!(hooks.len(), 1); + assert_eq!(hooks[0]["command"], "./hooks/other.sh"); } #[test] - fn test_cursor_hook_outputs_cursor_format() { - assert!(CURSOR_REWRITE_HOOK.contains("\"permission\": \"allow\"")); - assert!(CURSOR_REWRITE_HOOK.contains("\"updated_input\"")); - assert!(!CURSOR_REWRITE_HOOK.contains("hookSpecificOutput")); + fn test_remove_cursor_hook_not_present() { + let mut json_content = serde_json::json!({ + "version": 1, + "hooks": { + "preToolUse": [ + { "command": "./hooks/other.sh", "matcher": "Shell" } + ] + } + }); + + let removed = remove_cursor_hook_from_json(&mut json_content); + assert!(!removed); } } diff --git a/src/hooks/integrity.rs b/src/hooks/integrity.rs index 27af46e95..5b0721d0f 100644 --- a/src/hooks/integrity.rs +++ b/src/hooks/integrity.rs @@ -120,6 +120,8 @@ pub fn remove_hash(hook_path: &Path) -> Result { /// /// Returns `IntegrityStatus` indicating the result. Callers decide /// how to handle each status (warn, block, ignore). +/// NOTE: Legacy — kept for backwards compatibility. Prefer `verify_hook_at()` directly. +#[allow(dead_code)] pub fn verify_hook() -> Result { let hook_path = resolve_hook_path()?; verify_hook_at(&hook_path) @@ -200,6 +202,25 @@ pub fn run_verify(verbose: u8) -> Result<()> { eprintln!("Hash: {}", hash_file.display()); } + // If no legacy script exists, check for native binary command registration + if !hook_path.exists() && !hash_file.exists() { + // Check if the native binary command is registered in settings.json + let home = dirs::home_dir().context("Cannot determine home directory")?; + let settings_path = home.join(CLAUDE_DIR).join("settings.json"); + if settings_path.exists() { + let content = fs::read_to_string(&settings_path).unwrap_or_default(); + if content.contains("rtk hook claude") { + println!("PASS native binary hook registered in settings.json"); + println!(" command: rtk hook claude"); + println!(" (no script file — integrity check not applicable)"); + return Ok(()); + } + } + println!("SKIP RTK hook not installed"); + println!(" Run `rtk init -g` to install."); + return Ok(()); + } + match verify_hook_at(&hook_path)? { IntegrityStatus::Verified => { let hash = compute_hash(&hook_path)?; @@ -245,10 +266,21 @@ pub fn run_verify(verbose: u8) -> Result<()> { /// - `Tampered`: print warning to stderr, exit 1 /// - `OrphanedHash`: warn to stderr, continue /// +/// When RTK uses native binary commands (no script file), integrity +/// checking is a no-op — there is no script to tamper with. +/// /// No env-var bypass is provided — if the hook is legitimately modified, /// re-run `rtk init -g --auto-patch` to re-establish the baseline. pub fn runtime_check() -> Result<()> { - match verify_hook()? { + let hook_path = resolve_hook_path()?; + + // If the legacy script doesn't exist, skip integrity check entirely. + // In the new binary command model, there is no script file to verify. + if !hook_path.exists() { + return Ok(()); + } + + match verify_hook_at(&hook_path)? { IntegrityStatus::Verified | IntegrityStatus::NotInstalled => { // All good, proceed } diff --git a/src/hooks/mod.rs b/src/hooks/mod.rs index 14157c43f..02567fa2c 100644 --- a/src/hooks/mod.rs +++ b/src/hooks/mod.rs @@ -3,6 +3,7 @@ pub mod constants; pub mod hook_audit_cmd; pub mod hook_check; +#[deny(clippy::print_stdout, clippy::print_stderr)] pub mod hook_cmd; pub mod init; pub mod integrity; diff --git a/src/main.rs b/src/main.rs index 476b05167..5718fe6f1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,5 +1,4 @@ mod analytics; -mod cmd; mod cmds; mod core; mod discover; @@ -710,6 +709,8 @@ enum Commands { enum HookCommands { /// Process Claude Code PreToolUse hook (reads JSON from stdin) Claude, + /// Process Cursor Agent hook (reads JSON from stdin) + Cursor, /// Process Gemini CLI BeforeTool hook (reads JSON from stdin) Gemini, /// Process Copilot preToolUse hook (VS Code + Copilot CLI, reads JSON from stdin) @@ -1951,7 +1952,11 @@ fn run_cli() -> Result { Commands::Hook { command } => match command { HookCommands::Claude => { - cmd::hook::claude::run()?; + hooks::hook_cmd::run_claude()?; + 0 + } + HookCommands::Cursor => { + hooks::hook_cmd::run_cursor()?; 0 } HookCommands::Gemini => { @@ -1962,16 +1967,22 @@ fn run_cli() -> Result { hooks::hook_cmd::run_copilot()?; 0 } - HookCommands::Check { agent, command } => { + HookCommands::Check { agent: _, command } => { + use crate::discover::registry::rewrite_command; let raw = command.join(" "); - let (rewritten, allowed, exit_code) = - cmd::hook::format_for_claude(cmd::check_for_hook(&raw, &agent)); - if allowed { - println!("{}", rewritten); - } else { - eprintln!("{}", rewritten); + let excluded = crate::core::config::Config::load() + .map(|c| c.hooks.exclude_commands) + .unwrap_or_default(); + match rewrite_command(&raw, &excluded) { + Some(rewritten) => { + println!("{}", rewritten); + 0 + } + None => { + eprintln!("No rewrite for: {}", raw); + 1 + } } - exit_code } }, @@ -1995,7 +2006,20 @@ fn run_cli() -> Result { None if !args.is_empty() => args.join(" "), None => String::new(), }; - cmd::exec::execute(&raw, cli.verbose)? + if raw.trim().is_empty() { + 0 + } else { + // Execute via shell passthrough with token tracking + use std::process::Command as ProcCommand; + let shell = if cfg!(windows) { "cmd" } else { "sh" }; + let flag = if cfg!(windows) { "/C" } else { "-c" }; + let status = ProcCommand::new(shell) + .arg(flag) + .arg(&raw) + .status() + .with_context(|| format!("Failed to execute: {}", raw))?; + status.code().unwrap_or(1) + } } Commands::Proxy { args } => { From 02e3a6f8f4639c86a585701cf5ed03c0f853e752 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sat, 4 Apr 2026 19:47:20 +0200 Subject: [PATCH 04/44] fix(stream): wire run_streaming as standard exec for filter commands --- src/cmds/rust/runner.rs | 81 ++++++++++++++++++----------------------- src/core/runner.rs | 38 ++++++++----------- src/core/stream.rs | 11 +++++- 3 files changed, 60 insertions(+), 70 deletions(-) diff --git a/src/cmds/rust/runner.rs b/src/cmds/rust/runner.rs index 101dd31c4..4a80f2fb6 100644 --- a/src/cmds/rust/runner.rs +++ b/src/cmds/rust/runner.rs @@ -1,9 +1,10 @@ //! Runs arbitrary commands and captures only stderr or test failures. +use crate::core::stream::{self, FilterMode, StdinMode}; use crate::core::tracking; use anyhow::{Context, Result}; use regex::Regex; -use std::process::{Command, Stdio}; +use std::process::Command; /// Run a command and filter output to show only errors/warnings pub fn run_err(command: &str, verbose: u8) -> Result { @@ -13,35 +14,29 @@ pub fn run_err(command: &str, verbose: u8) -> Result { eprintln!("Running: {}", command); } - let output = if cfg!(target_os = "windows") { - Command::new("cmd") - .args(["/C", command]) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .output() + let mut cmd = if cfg!(target_os = "windows") { + let mut c = Command::new("cmd"); + c.args(["/C", command]); + c } else { - Command::new("sh") - .args(["-c", command]) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .output() - } - .context("Failed to execute command")?; + let mut c = Command::new("sh"); + c.args(["-c", command]); + c + }; + + let result = stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly) + .context("Failed to execute command")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); - let filtered = filter_errors(&raw); + let raw = &result.raw; + let exit_code = result.exit_code; + let filtered = filter_errors(raw); let mut rtk = String::new(); if filtered.is_empty() { - if output.status.success() { + if exit_code == 0 { rtk.push_str("[ok] Command completed successfully (no errors)"); } else { - rtk.push_str(&format!( - "[FAIL] Command failed (exit code: {:?})\n", - output.status.code() - )); + rtk.push_str(&format!("[FAIL] Command failed (exit code: {})\n", exit_code)); let lines: Vec<&str> = raw.lines().collect(); for line in lines.iter().rev().take(10).rev() { rtk.push_str(&format!(" {}\n", line)); @@ -51,13 +46,12 @@ pub fn run_err(command: &str, verbose: u8) -> Result { rtk.push_str(&filtered); } - let exit_code = crate::core::utils::exit_code_from_output(&output, "err"); - if let Some(hint) = crate::core::tee::tee_and_hint(&raw, "err", exit_code) { + if let Some(hint) = crate::core::tee::tee_and_hint(raw, "err", exit_code) { println!("{}\n{}", rtk, hint); } else { println!("{}", rtk); } - timer.track(command, "rtk run-err", &raw, &rtk); + timer.track(command, "rtk run-err", raw, &rtk); Ok(exit_code) } @@ -69,33 +63,28 @@ pub fn run_test(command: &str, verbose: u8) -> Result { eprintln!("Running tests: {}", command); } - let output = if cfg!(target_os = "windows") { - Command::new("cmd") - .args(["/C", command]) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .output() + let mut cmd = if cfg!(target_os = "windows") { + let mut c = Command::new("cmd"); + c.args(["/C", command]); + c } else { - Command::new("sh") - .args(["-c", command]) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .output() - } - .context("Failed to execute test command")?; + let mut c = Command::new("sh"); + c.args(["-c", command]); + c + }; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let result = stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly) + .context("Failed to execute test command")?; - let exit_code = crate::core::utils::exit_code_from_output(&output, "test"); - let summary = extract_test_summary(&raw, command); - if let Some(hint) = crate::core::tee::tee_and_hint(&raw, "test", exit_code) { + let raw = &result.raw; + let exit_code = result.exit_code; + let summary = extract_test_summary(raw, command); + if let Some(hint) = crate::core::tee::tee_and_hint(raw, "test", exit_code) { println!("{}\n{}", summary, hint); } else { println!("{}", summary); } - timer.track(command, "rtk run-test", &raw, &summary); + timer.track(command, "rtk run-test", raw, &summary); Ok(exit_code) } diff --git a/src/core/runner.rs b/src/core/runner.rs index fb2fc7942..02ce9d41e 100644 --- a/src/core/runner.rs +++ b/src/core/runner.rs @@ -3,8 +3,9 @@ use anyhow::{Context, Result}; use std::process::Command; +use crate::core::stream::{self, FilterMode, StdinMode}; use crate::core::tracking; -use crate::core::utils::{exit_code_from_output, exit_code_from_status}; +use crate::core::utils::exit_code_from_status; pub fn print_with_hint(filtered: &str, raw: &str, tee_label: &str, exit_code: i32) { if let Some(hint) = crate::core::tee::tee_and_hint(raw, tee_label, exit_code) { @@ -65,53 +66,44 @@ where { let timer = tracking::TimedExecution::start(); - let output = cmd - .output() + // CaptureOnly: stderr streams live, stdout buffered silently. + // result.filtered = raw_stdout, result.raw = stdout + stderr + let result = stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly) .with_context(|| format!("Failed to run {}", tool_name))?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); - let exit_code = exit_code_from_output(&output, tool_name); + let exit_code = result.exit_code; + let raw_stdout = &result.filtered; + let raw = &result.raw; - // On failure, skip filtering and return early (e.g. psql error messages - // containing '|' would be misinterpreted by the table parser) if opts.skip_filter_on_failure && exit_code != 0 { - if !stderr.trim().is_empty() { - eprintln!("{}", stderr.trim()); - } timer.track( &format!("{} {}", tool_name, args_display), &format!("rtk {} {}", tool_name, args_display), - &raw, - stderr.as_ref(), + raw, + raw, ); return Ok(exit_code); } let text_to_filter = if opts.filter_stdout_only { - &stdout + raw_stdout } else { - raw.as_str() + raw }; let filtered = filter_fn(text_to_filter); if let Some(label) = opts.tee_label { - print_with_hint(&filtered, &raw, label, exit_code); + print_with_hint(&filtered, raw, label, exit_code); } else if opts.no_trailing_newline { print!("{}", filtered); } else { println!("{}", filtered); } - if opts.filter_stdout_only && !stderr.trim().is_empty() { - eprintln!("{}", stderr.trim()); - } - let raw_for_tracking = if opts.filter_stdout_only { - stdout.as_ref() + raw_stdout } else { - raw.as_str() + raw }; timer.track( &format!("{} {}", tool_name, args_display), diff --git a/src/core/stream.rs b/src/core/stream.rs index ea04a9e9e..48bdde820 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -35,10 +35,10 @@ impl Option> StreamFilter for LineFilter { pub enum FilterMode { Streaming(Box), Buffered(fn(&str) -> String), + CaptureOnly, Passthrough, } -#[allow(dead_code)] pub enum StdinMode { Inherit, Filter(Box), @@ -202,6 +202,15 @@ pub fn run_streaming( Ok(_) => {} } } + FilterMode::CaptureOnly => { + for line in BufReader::new(stdout).lines().map_while(Result::ok) { + if raw_stdout.len() < RAW_CAP { + raw_stdout.push_str(&line); + raw_stdout.push('\n'); + } + } + filtered = raw_stdout.clone(); + } } } From a2312df5f2533639afd937686d94e12eab7760bd Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sat, 4 Apr 2026 20:38:43 +0200 Subject: [PATCH 05/44] feat(stream): standardize command execution with exec_capture() --- src/analytics/ccusage.rs | 21 +- src/cmds/cloud/aws_cmd.rs | 60 ++-- src/cmds/cloud/container.rs | 101 +++--- src/cmds/cloud/curl_cmd.rs | 21 +- src/cmds/cloud/wget_cmd.rs | 47 ++- src/cmds/dotnet/dotnet_cmd.rs | 54 ++-- src/cmds/git/git.rs | 559 ++++++++++++++++------------------ src/cmds/git/gt_cmd.rs | 23 +- src/cmds/go/go_cmd.rs | 55 ++-- src/cmds/go/golangci_cmd.rs | 18 +- src/cmds/js/lint_cmd.rs | 34 +-- src/cmds/js/playwright_cmd.rs | 17 +- src/cmds/js/pnpm_cmd.rs | 36 +-- src/cmds/js/prisma_cmd.rs | 59 ++-- src/cmds/js/vitest_cmd.rs | 16 +- src/cmds/python/pip_cmd.rs | 41 +-- src/cmds/system/format_cmd.rs | 11 +- src/cmds/system/grep_cmd.rs | 27 +- src/cmds/system/summary.rs | 35 +-- src/core/stream.rs | 72 +++++ src/hooks/permissions.rs | 15 +- 21 files changed, 626 insertions(+), 696 deletions(-) diff --git a/src/analytics/ccusage.rs b/src/analytics/ccusage.rs index 15d73109b..7e57b7061 100644 --- a/src/analytics/ccusage.rs +++ b/src/analytics/ccusage.rs @@ -4,6 +4,7 @@ //! Claude Code API usage metrics. Handles subprocess execution, JSON parsing, //! and graceful degradation when ccusage is unavailable. +use crate::core::stream::exec_capture; use crate::core::utils::{resolved_command, tool_exists}; use anyhow::{Context, Result}; use serde::Deserialize; @@ -131,34 +132,30 @@ pub fn fetch(granularity: Granularity) -> Result>> { Granularity::Monthly => "monthly", }; - let output = cmd - .arg(subcommand) + cmd.arg(subcommand) .arg("--json") .arg("--since") - .arg("20250101") // 90 days back approx - .output(); + .arg("20250101"); // 90 days back approx - let output = match output { + let result = match exec_capture(&mut cmd) { Err(e) => { eprintln!("[warn] ccusage execution failed: {}", e); return Ok(None); } - Ok(o) => o, + Ok(r) => r, }; - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); + if !result.success() { eprintln!( "[warn] ccusage exited with {}: {}", - output.status, - stderr.trim() + result.exit_code, + result.stderr.trim() ); return Ok(None); } - let stdout = String::from_utf8_lossy(&output.stdout); let periods = - parse_json(&stdout, granularity).context("Failed to parse ccusage JSON output")?; + parse_json(&result.stdout, granularity).context("Failed to parse ccusage JSON output")?; Ok(Some(periods)) } diff --git a/src/cmds/cloud/aws_cmd.rs b/src/cmds/cloud/aws_cmd.rs index cf5f02b8c..c344cea0d 100644 --- a/src/cmds/cloud/aws_cmd.rs +++ b/src/cmds/cloud/aws_cmd.rs @@ -3,11 +3,9 @@ //! Replaces verbose `--output table`/`text` with JSON, then compresses. //! Specialized filters for high-frequency commands (STS, S3, EC2, ECS, RDS, CloudFormation). +use crate::core::stream::{exec_capture, CaptureResult}; use crate::core::tracking; -use crate::core::utils::{ - exit_code_from_output, exit_code_from_status, join_with_overflow, resolved_command, - truncate_iso_date, -}; +use crate::core::utils::{join_with_overflow, resolved_command, truncate_iso_date}; use crate::json_cmd; use anyhow::{Context, Result}; use serde_json::Value; @@ -86,21 +84,20 @@ fn run_generic(subcommand: &str, args: &[String], verbose: u8, full_sub: &str) - eprintln!("Running: aws {}", full_sub); } - let output = cmd.output().context("Failed to run aws CLI")?; - let raw = String::from_utf8_lossy(&output.stdout).to_string(); - let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + let result = exec_capture(&mut cmd).context("Failed to run aws CLI")?; - if !output.status.success() { + if !result.success() { timer.track( &format!("aws {}", full_sub), &format!("rtk aws {}", full_sub), - &stderr, - &stderr, + &result.stderr, + &result.stderr, ); - eprintln!("{}", stderr.trim()); - return Ok(exit_code_from_output(&output, "aws")); + eprintln!("{}", result.stderr.trim()); + return Ok(result.exit_code); } + let raw = result.stdout; let filtered = match json_cmd::filter_json_string(&raw, JSON_COMPRESS_DEPTH) { Ok(schema) => { println!("{}", schema); @@ -127,7 +124,7 @@ fn run_aws_json( sub_args: &[&str], extra_args: &[String], verbose: u8, -) -> Result<(String, String, std::process::ExitStatus)> { +) -> Result { let mut cmd = resolved_command("aws"); for arg in sub_args { cmd.arg(arg); @@ -153,17 +150,14 @@ fn run_aws_json( eprintln!("Running: {}", cmd_desc); } - let output = cmd - .output() + let result = exec_capture(&mut cmd) .context(format!("Failed to run {}", cmd_desc))?; - let stdout = String::from_utf8_lossy(&output.stdout).to_string(); - let stderr = String::from_utf8_lossy(&output.stderr).to_string(); - if !output.status.success() { - eprintln!("{}", stderr.trim()); + if !result.success() { + eprintln!("{}", result.stderr.trim()); } - Ok((stdout, stderr, output.status)) + Ok(result) } fn run_aws_filtered( @@ -175,14 +169,14 @@ fn run_aws_filtered( let timer = tracking::TimedExecution::start(); let label = format!("aws {}", sub_args.join(" ")); let rtk_label = format!("rtk {}", label); - let (raw, stderr, status) = run_aws_json(sub_args, extra_args, verbose)?; - if !status.success() { - timer.track(&label, &rtk_label, &stderr, &stderr); - return Ok(exit_code_from_status(&status, "aws")); + let result = run_aws_json(sub_args, extra_args, verbose)?; + if !result.success() { + timer.track(&label, &rtk_label, &result.stderr, &result.stderr); + return Ok(result.exit_code); } - let filtered = filter_fn(&raw).unwrap_or_else(|| raw.clone()); + let filtered = filter_fn(&result.stdout).unwrap_or_else(|| result.stdout.clone()); println!("{}", filtered); - timer.track(&label, &rtk_label, &raw, &filtered); + timer.track(&label, &rtk_label, &result.stdout, &filtered); Ok(0) } @@ -209,16 +203,16 @@ fn run_s3_ls(extra_args: &[String], verbose: u8) -> Result { eprintln!("Running: aws s3 ls {}", extra_args.join(" ")); } - let output = cmd.output().context("Failed to run aws s3 ls")?; - let raw = String::from_utf8_lossy(&output.stdout).to_string(); + let result = exec_capture(&mut cmd).context("Failed to run aws s3 ls")?; - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr).to_string(); - timer.track("aws s3 ls", "rtk aws s3 ls", &stderr, &stderr); - eprintln!("{}", stderr.trim()); - return Ok(exit_code_from_output(&output, "aws")); + if !result.success() { + timer.track("aws s3 ls", "rtk aws s3 ls", &result.stderr, &result.stderr); + eprintln!("{}", result.stderr.trim()); + return Ok(result.exit_code); } + let raw = result.stdout; + let filtered = filter_s3_ls(&raw); println!("{}", filtered); diff --git a/src/cmds/cloud/container.rs b/src/cmds/cloud/container.rs index 25027a37a..3931ebf6e 100644 --- a/src/cmds/cloud/container.rs +++ b/src/cmds/cloud/container.rs @@ -1,8 +1,9 @@ //! Filters Docker and kubectl output into compact summaries. use crate::core::runner::{self, RunOptions}; +use crate::core::stream::exec_capture; use crate::core::tracking; -use crate::core::utils::{exit_code_from_output, resolved_command}; +use crate::core::utils::resolved_command; use anyhow::{Context, Result}; use serde_json::Value; use std::ffi::OsString; @@ -53,29 +54,24 @@ where fn docker_ps(_verbose: u8) -> Result { let timer = tracking::TimedExecution::start(); - let raw = resolved_command("docker") - .args(["ps"]) - .output() - .map(|o| String::from_utf8_lossy(&o.stdout).to_string()) + let raw = exec_capture(resolved_command("docker").args(["ps"])) + .map(|r| r.stdout) .unwrap_or_default(); - let output = resolved_command("docker") - .args([ - "ps", - "--format", - "{{.ID}}\t{{.Names}}\t{{.Status}}\t{{.Image}}\t{{.Ports}}", - ]) - .output() - .context("Failed to run docker ps")?; - - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - eprint!("{}", stderr); + let result = exec_capture(resolved_command("docker").args([ + "ps", + "--format", + "{{.ID}}\t{{.Names}}\t{{.Status}}\t{{.Image}}\t{{.Ports}}", + ])) + .context("Failed to run docker ps")?; + + if !result.success() { + eprint!("{}", result.stderr); timer.track("docker ps", "rtk docker ps", &raw, &raw); - return Ok(exit_code_from_output(&output, "docker")); + return Ok(result.exit_code); } - let stdout = String::from_utf8_lossy(&output.stdout); + let stdout = result.stdout; let mut rtk = String::new(); if stdout.trim().is_empty() { @@ -122,25 +118,24 @@ fn docker_ps(_verbose: u8) -> Result { fn docker_images(_verbose: u8) -> Result { let timer = tracking::TimedExecution::start(); - let raw = resolved_command("docker") - .args(["images"]) - .output() - .map(|o| String::from_utf8_lossy(&o.stdout).to_string()) + let raw = exec_capture(resolved_command("docker").args(["images"])) + .map(|r| r.stdout) .unwrap_or_default(); - let output = resolved_command("docker") - .args(["images", "--format", "{{.Repository}}:{{.Tag}}\t{{.Size}}"]) - .output() - .context("Failed to run docker images")?; + let result = exec_capture(resolved_command("docker").args([ + "images", + "--format", + "{{.Repository}}:{{.Tag}}\t{{.Size}}", + ])) + .context("Failed to run docker images")?; - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - eprint!("{}", stderr); + if !result.success() { + eprint!("{}", result.stderr); timer.track("docker images", "rtk docker images", &raw, &raw); - return Ok(exit_code_from_output(&output, "docker")); + return Ok(result.exit_code); } - let stdout = String::from_utf8_lossy(&output.stdout); + let stdout = result.stdout; let lines: Vec<&str> = stdout.lines().collect(); let mut rtk = String::new(); @@ -532,35 +527,29 @@ pub fn run_compose_ps(verbose: u8) -> Result { let timer = tracking::TimedExecution::start(); // Raw output for token tracking - let raw_output = resolved_command("docker") - .args(["compose", "ps"]) - .output() + let raw_result = exec_capture(resolved_command("docker").args(["compose", "ps"])) .context("Failed to run docker compose ps")?; - if !raw_output.status.success() { - let stderr = String::from_utf8_lossy(&raw_output.stderr); - eprintln!("{}", stderr); - return Ok(exit_code_from_output(&raw_output, "docker")); + if !raw_result.success() { + eprintln!("{}", raw_result.stderr); + return Ok(raw_result.exit_code); } - let raw = String::from_utf8_lossy(&raw_output.stdout).to_string(); + let raw = raw_result.stdout; // Structured output for parsing (same pattern as docker_ps) - let output = resolved_command("docker") - .args([ - "compose", - "ps", - "--format", - "{{.Name}}\t{{.Image}}\t{{.Status}}\t{{.Ports}}", - ]) - .output() - .context("Failed to run docker compose ps --format")?; - - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - eprintln!("{}", stderr); - return Ok(exit_code_from_output(&output, "docker")); - } - let structured = String::from_utf8_lossy(&output.stdout).to_string(); + let result = exec_capture(resolved_command("docker").args([ + "compose", + "ps", + "--format", + "{{.Name}}\t{{.Image}}\t{{.Status}}\t{{.Ports}}", + ])) + .context("Failed to run docker compose ps --format")?; + + if !result.success() { + eprintln!("{}", result.stderr); + return Ok(result.exit_code); + } + let structured = result.stdout; if verbose > 0 { eprintln!("raw docker compose ps:\n{}", raw); diff --git a/src/cmds/cloud/curl_cmd.rs b/src/cmds/cloud/curl_cmd.rs index d6930ef67..94a687259 100644 --- a/src/cmds/cloud/curl_cmd.rs +++ b/src/cmds/cloud/curl_cmd.rs @@ -1,7 +1,8 @@ //! Runs curl and auto-compresses JSON responses. +use crate::core::stream::exec_capture; use crate::core::tracking; -use crate::core::utils::{exit_code_from_output, resolved_command, truncate}; +use crate::core::utils::{resolved_command, truncate}; use crate::json_cmd; use anyhow::{Context, Result}; @@ -20,25 +21,23 @@ pub fn run(args: &[String], verbose: u8) -> Result { eprintln!("Running: curl -s {}", args.join(" ")); } - let output = cmd.output().context("Failed to run curl")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); + let result = exec_capture(&mut cmd).context("Failed to run curl")?; // Early exit: don't feed HTTP error bodies (HTML 404 etc.) through JSON schema filter - if !output.status.success() { - let msg = if stderr.trim().is_empty() { - stdout.trim().to_string() + if !result.success() { + let msg = if result.stderr.trim().is_empty() { + result.stdout.trim().to_string() } else { - stderr.trim().to_string() + result.stderr.trim().to_string() }; eprintln!("FAILED: curl {}", msg); - return Ok(exit_code_from_output(&output, "curl")); + return Ok(result.exit_code); } - let raw = stdout.to_string(); + let raw = result.stdout.clone(); // Auto-detect JSON and pipe through filter - let filtered = filter_curl_output(&stdout); + let filtered = filter_curl_output(&result.stdout); println!("{}", filtered); timer.track( diff --git a/src/cmds/cloud/wget_cmd.rs b/src/cmds/cloud/wget_cmd.rs index 69dc93ab2..4faed8081 100644 --- a/src/cmds/cloud/wget_cmd.rs +++ b/src/cmds/cloud/wget_cmd.rs @@ -1,5 +1,6 @@ +use crate::core::stream::exec_capture; use crate::core::tracking; -use crate::core::utils::{exit_code_from_output, resolved_command}; +use crate::core::utils::resolved_command; use anyhow::{Context, Result}; /// Compact wget - strips progress bars, shows only result @@ -19,18 +20,14 @@ pub fn run(url: &str, args: &[String], verbose: u8) -> Result { } cmd_args.push(url); - let output = resolved_command("wget") - .args(&cmd_args) - .output() - .context("Failed to run wget")?; + let mut cmd = resolved_command("wget"); + cmd.args(&cmd_args); + let result = exec_capture(&mut cmd).context("Failed to run wget")?; - let stderr = String::from_utf8_lossy(&output.stderr); - let stdout = String::from_utf8_lossy(&output.stdout); + let raw_output = format!("{}\n{}", result.stderr, result.stdout); - let raw_output = format!("{}\n{}", stderr, stdout); - - if output.status.success() { - let filename = extract_filename_from_output(&stderr, url, args); + if result.success() { + let filename = extract_filename_from_output(&result.stderr, url, args); let size = get_file_size(&filename); let msg = format!( "{} ok | {} | {}", @@ -41,11 +38,11 @@ pub fn run(url: &str, args: &[String], verbose: u8) -> Result { println!("{}", msg); timer.track(&format!("wget {}", url), "rtk wget", &raw_output, &msg); } else { - let error = parse_error(&stderr, &stdout); + let error = parse_error(&result.stderr, &result.stdout); let msg = format!("{} FAILED: {}", compact_url(url), error); println!("{}", msg); timer.track(&format!("wget {}", url), "rtk wget", &raw_output, &msg); - return Ok(exit_code_from_output(&output, "wget")); + return Ok(result.exit_code); } Ok(0) @@ -65,16 +62,13 @@ pub fn run_stdout(url: &str, args: &[String], verbose: u8) -> Result { } cmd_args.push(url); - let output = resolved_command("wget") - .args(&cmd_args) - .output() - .context("Failed to run wget")?; + let mut cmd = resolved_command("wget"); + cmd.args(&cmd_args); + let result = exec_capture(&mut cmd).context("Failed to run wget")?; - if output.status.success() { - let content = String::from_utf8_lossy(&output.stdout); - let lines: Vec<&str> = content.lines().collect(); + if result.success() { + let lines: Vec<&str> = result.stdout.lines().collect(); let total = lines.len(); - let raw_output = content.to_string(); let mut rtk_output = String::new(); if total > 20 { @@ -82,7 +76,7 @@ pub fn run_stdout(url: &str, args: &[String], verbose: u8) -> Result { "{} ok | {} lines | {}\n", compact_url(url), total, - format_size(output.stdout.len() as u64) + format_size(result.stdout.len() as u64) )); rtk_output.push_str("--- first 10 lines ---\n"); for line in lines.iter().take(10) { @@ -99,16 +93,15 @@ pub fn run_stdout(url: &str, args: &[String], verbose: u8) -> Result { timer.track( &format!("wget -O - {}", url), "rtk wget -o", - &raw_output, + &result.stdout, &rtk_output, ); } else { - let stderr = String::from_utf8_lossy(&output.stderr); - let error = parse_error(&stderr, ""); + let error = parse_error(&result.stderr, ""); let msg = format!("{} FAILED: {}", compact_url(url), error); println!("{}", msg); - timer.track(&format!("wget -O - {}", url), "rtk wget -o", &stderr, &msg); - return Ok(exit_code_from_output(&output, "wget")); + timer.track(&format!("wget -O - {}", url), "rtk wget -o", &result.stderr, &msg); + return Ok(result.exit_code); } Ok(0) diff --git a/src/cmds/dotnet/dotnet_cmd.rs b/src/cmds/dotnet/dotnet_cmd.rs index f1e5fe0d5..a60935711 100644 --- a/src/cmds/dotnet/dotnet_cmd.rs +++ b/src/cmds/dotnet/dotnet_cmd.rs @@ -1,8 +1,9 @@ //! Filters dotnet CLI output — build, test, and format results. use crate::binlog; +use crate::core::stream::exec_capture; use crate::core::tracking; -use crate::core::utils::{exit_code_from_output, resolved_command, truncate}; +use crate::core::utils::{resolved_command, truncate}; use crate::dotnet_format_report; use crate::dotnet_trx; use anyhow::{Context, Result}; @@ -46,10 +47,8 @@ pub fn run_format(args: &[String], verbose: u8) -> Result { } let command_started_at = SystemTime::now(); - let output = cmd.output().context("Failed to run dotnet format")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let result = exec_capture(&mut cmd).context("Failed to run dotnet format")?; + let raw = format!("{}\n{}", result.stdout, result.stderr); let check_mode = !has_write_mode_override(args); let filtered = @@ -69,7 +68,7 @@ pub fn run_format(args: &[String], verbose: u8) -> Result { } } - Ok(exit_code_from_output(&output, "dotnet")) + Ok(result.exit_code) } pub fn run_passthrough(args: &[OsString], verbose: u8) -> Result { @@ -91,16 +90,13 @@ pub fn run_passthrough(args: &[OsString], verbose: u8) -> Result { eprintln!("Running: dotnet {} ...", subcommand); } - let output = cmd - .output() + let result = exec_capture(&mut cmd) .with_context(|| format!("Failed to run dotnet {}", subcommand))?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); - print!("{}", stdout); - eprint!("{}", stderr); + print!("{}", result.stdout); + eprint!("{}", result.stderr); timer.track( &format!("dotnet {}", subcommand), @@ -109,7 +105,7 @@ pub fn run_passthrough(args: &[OsString], verbose: u8) -> Result { &raw, ); - Ok(exit_code_from_output(&output, "dotnet")) + Ok(result.exit_code) } fn run_dotnet_with_binlog(subcommand: &str, args: &[String], verbose: u8) -> Result { @@ -135,27 +131,25 @@ fn run_dotnet_with_binlog(subcommand: &str, args: &[String], verbose: u8) -> Res } let command_started_at = SystemTime::now(); - let output = cmd - .output() + let result = exec_capture(&mut cmd) .with_context(|| format!("Failed to run dotnet {}", subcommand))?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); + let command_success = result.success(); let filtered = match subcommand { "build" => { let binlog_summary = if should_expect_binlog && binlog_path.exists() { normalize_build_summary( binlog::parse_build(&binlog_path).unwrap_or_default(), - output.status.success(), + command_success, ) } else { binlog::BuildSummary::default() }; let raw_summary = normalize_build_summary( binlog::parse_build_from_text(&raw), - output.status.success(), + command_success, ); let summary = merge_build_summaries(binlog_summary, raw_summary); format_build_output(&summary, &binlog_path) @@ -176,18 +170,18 @@ fn run_dotnet_with_binlog(subcommand: &str, args: &[String], verbose: u8) -> Res command_started_at, ); - let summary = normalize_test_summary(summary, output.status.success()); + let summary = normalize_test_summary(summary, command_success); let binlog_diagnostics = if should_expect_binlog && binlog_path.exists() { normalize_build_summary( binlog::parse_build(&binlog_path).unwrap_or_default(), - output.status.success(), + command_success, ) } else { binlog::BuildSummary::default() }; let raw_diagnostics = normalize_build_summary( binlog::parse_build_from_text(&raw), - output.status.success(), + command_success, ); let test_build_summary = merge_build_summaries(binlog_diagnostics, raw_diagnostics); format_test_output( @@ -201,14 +195,14 @@ fn run_dotnet_with_binlog(subcommand: &str, args: &[String], verbose: u8) -> Res let binlog_summary = if should_expect_binlog && binlog_path.exists() { normalize_restore_summary( binlog::parse_restore(&binlog_path).unwrap_or_default(), - output.status.success(), + command_success, ) } else { binlog::RestoreSummary::default() }; let raw_summary = normalize_restore_summary( binlog::parse_restore_from_text(&raw), - output.status.success(), + command_success, ); let summary = merge_restore_summaries(binlog_summary, raw_summary); @@ -219,9 +213,9 @@ fn run_dotnet_with_binlog(subcommand: &str, args: &[String], verbose: u8) -> Res _ => raw.clone(), }; - let output_to_print = if !output.status.success() { - let stdout_trimmed = stdout.trim(); - let stderr_trimmed = stderr.trim(); + let output_to_print = if !command_success { + let stdout_trimmed = result.stdout.trim(); + let stderr_trimmed = result.stderr.trim(); if !stdout_trimmed.is_empty() { format!("{}\n\n{}", stdout_trimmed, filtered) } else if !stderr_trimmed.is_empty() { @@ -253,7 +247,7 @@ fn run_dotnet_with_binlog(subcommand: &str, args: &[String], verbose: u8) -> Res eprintln!("Binlog cleaned up: {}", binlog_path.display()); } - Ok(exit_code_from_output(&output, "dotnet")) + Ok(result.exit_code) } fn build_binlog_path(subcommand: &str) -> PathBuf { diff --git a/src/cmds/git/git.rs b/src/cmds/git/git.rs index cc2cedf33..4fbdceea4 100644 --- a/src/cmds/git/git.rs +++ b/src/cmds/git/git.rs @@ -1,8 +1,9 @@ //! Filters git output — log, status, diff, and more — keeping just the essential info. use crate::core::config; +use crate::core::stream::exec_capture; use crate::core::tracking; -use crate::core::utils::{exit_code_from_output, exit_code_from_status, resolved_command}; +use crate::core::utils::{exit_code_from_status, resolved_command}; use anyhow::{Context, Result}; use std::ffi::OsString; use std::process::Command; @@ -85,22 +86,20 @@ fn run_diff( cmd.arg(arg); } - let output = cmd.output().context("Failed to run git diff")?; + let result = exec_capture(&mut cmd).context("Failed to run git diff")?; - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - eprintln!("{}", stderr); - return Ok(exit_code_from_output(&output, "git")); + if !result.success() { + eprintln!("{}", result.stderr); + return Ok(result.exit_code); } - let stdout = String::from_utf8_lossy(&output.stdout); - println!("{}", stdout.trim()); + println!("{}", result.stdout.trim()); timer.track( &format!("git diff {}", args.join(" ")), &format!("rtk git diff {} (passthrough)", args.join(" ")), - &stdout, - &stdout, + &result.stdout, + &result.stdout, ); return Ok(0); @@ -114,22 +113,19 @@ fn run_diff( cmd.arg(arg); } - let output = cmd.output().context("Failed to run git diff")?; - let stat_stdout = String::from_utf8_lossy(&output.stdout); + let result = exec_capture(&mut cmd).context("Failed to run git diff")?; - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - if !stderr.trim().is_empty() { - eprint!("{}", stderr); + if !result.success() { + if !result.stderr.trim().is_empty() { + eprint!("{}", result.stderr); } - let raw = stat_stdout.to_string(); timer.track( &format!("git diff {}", args.join(" ")), &format!("rtk git diff {}", args.join(" ")), - &raw, - &raw, + &result.stdout, + &result.stdout, ); - return Ok(exit_code_from_output(&output, "git")); + return Ok(result.exit_code); } if verbose > 0 { @@ -137,7 +133,7 @@ fn run_diff( } // Print stat summary first - println!("{}", stat_stdout.trim()); + println!("{}", result.stdout.trim()); // Now get actual diff but compact it let mut diff_cmd = git_cmd(global_args); @@ -146,13 +142,12 @@ fn run_diff( diff_cmd.arg(arg); } - let diff_output = diff_cmd.output().context("Failed to run git diff")?; - let diff_stdout = String::from_utf8_lossy(&diff_output.stdout); + let diff_result = exec_capture(&mut diff_cmd).context("Failed to run git diff")?; - let mut final_output = stat_stdout.to_string(); - if !diff_stdout.is_empty() { + let mut final_output = result.stdout.clone(); + if !diff_result.stdout.is_empty() { println!("\n--- Changes ---"); - let compacted = compact_diff(&diff_stdout, max_lines.unwrap_or(500)); + let compacted = compact_diff(&diff_result.stdout, max_lines.unwrap_or(500)); println!("{}", compacted); final_output.push_str("\n--- Changes ---\n"); final_output.push_str(&compacted); @@ -161,7 +156,7 @@ fn run_diff( timer.track( &format!("git diff {}", args.join(" ")), &format!("rtk git diff {}", args.join(" ")), - &format!("{}\n{}", stat_stdout, diff_stdout), + &format!("{}\n{}", result.stdout, diff_result.stdout), &final_output, ); @@ -195,24 +190,22 @@ fn run_show( for arg in args { cmd.arg(arg); } - let output = cmd.output().context("Failed to run git show")?; - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - eprintln!("{}", stderr); - return Ok(exit_code_from_output(&output, "git")); + let result = exec_capture(&mut cmd).context("Failed to run git show")?; + if !result.success() { + eprintln!("{}", result.stderr); + return Ok(result.exit_code); } - let stdout = String::from_utf8_lossy(&output.stdout); if wants_blob_show { - print!("{}", stdout); + print!("{}", result.stdout); } else { - println!("{}", stdout.trim()); + println!("{}", result.stdout.trim()); } timer.track( &format!("git show {}", args.join(" ")), &format!("rtk git show {} (passthrough)", args.join(" ")), - &stdout, - &stdout, + &result.stdout, + &result.stdout, ); return Ok(0); @@ -224,9 +217,8 @@ fn run_show( for arg in args { raw_cmd.arg(arg); } - let raw_output = raw_cmd - .output() - .map(|o| String::from_utf8_lossy(&o.stdout).to_string()) + let raw_output = exec_capture(&mut raw_cmd) + .map(|r| r.stdout) .unwrap_or_default(); // Step 1: one-line commit summary @@ -235,14 +227,12 @@ fn run_show( for arg in args { summary_cmd.arg(arg); } - let summary_output = summary_cmd.output().context("Failed to run git show")?; - if !summary_output.status.success() { - let stderr = String::from_utf8_lossy(&summary_output.stderr); - eprintln!("{}", stderr); - return Ok(exit_code_from_output(&summary_output, "git")); + let summary_result = exec_capture(&mut summary_cmd).context("Failed to run git show")?; + if !summary_result.success() { + eprintln!("{}", summary_result.stderr); + return Ok(summary_result.exit_code); } - let summary = String::from_utf8_lossy(&summary_output.stdout); - println!("{}", summary.trim()); + println!("{}", summary_result.stdout.trim()); // Step 2: --stat summary let mut stat_cmd = git_cmd(global_args); @@ -250,9 +240,8 @@ fn run_show( for arg in args { stat_cmd.arg(arg); } - let stat_output = stat_cmd.output().context("Failed to run git show --stat")?; - let stat_stdout = String::from_utf8_lossy(&stat_output.stdout); - let stat_text = stat_stdout.trim(); + let stat_result = exec_capture(&mut stat_cmd).context("Failed to run git show --stat")?; + let stat_text = stat_result.stdout.trim(); if !stat_text.is_empty() { println!("{}", stat_text); } @@ -263,11 +252,10 @@ fn run_show( for arg in args { diff_cmd.arg(arg); } - let diff_output = diff_cmd.output().context("Failed to run git show (diff)")?; - let diff_stdout = String::from_utf8_lossy(&diff_output.stdout); - let diff_text = diff_stdout.trim(); + let diff_result = exec_capture(&mut diff_cmd).context("Failed to run git show (diff)")?; + let diff_text = diff_result.stdout.trim(); - let mut final_output = summary.to_string(); + let mut final_output = summary_result.stdout.clone(); if !diff_text.is_empty() { if verbose > 0 { println!("\n--- Changes ---"); @@ -439,28 +427,25 @@ fn run_log( cmd.arg(arg); } - let output = cmd.output().context("Failed to run git log")?; + let result = exec_capture(&mut cmd).context("Failed to run git log")?; - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - eprintln!("{}", stderr); - return Ok(exit_code_from_output(&output, "git")); + if !result.success() { + eprintln!("{}", result.stderr); + return Ok(result.exit_code); } - let stdout = String::from_utf8_lossy(&output.stdout); - if verbose > 0 { eprintln!("Git log output:"); } // Post-process: truncate long messages, cap lines only if RTK set the default - let filtered = filter_log_output(&stdout, limit, user_set_limit, has_format_flag); + let filtered = filter_log_output(&result.stdout, limit, user_set_limit, has_format_flag); println!("{}", filtered); timer.track( &format!("git log {}", args.join(" ")), &format!("rtk git log {}", args.join(" ")), - &stdout, + &result.stdout, &filtered, ); @@ -743,41 +728,35 @@ fn run_status(args: &[String], verbose: u8, global_args: &[String]) -> Result 0 || !stderr.is_empty() { - eprint!("{}", stderr); + if verbose > 0 || !result.stderr.is_empty() { + eprint!("{}", result.stderr); } // Apply minimal filtering: strip ANSI, remove hints, empty lines - let filtered = filter_status_with_args(&stdout); + let filtered = filter_status_with_args(&result.stdout); print!("{}", filtered); timer.track( &format!("git status {}", args.join(" ")), &format!("rtk git status {}", args.join(" ")), - &stdout, + &result.stdout, &filtered, ); @@ -786,28 +765,24 @@ fn run_status(args: &[String], verbose: u8, global_args: &[String]) -> Result Result } } - let output = cmd.output().context("Failed to run git add")?; + let result = exec_capture(&mut cmd).context("Failed to run git add")?; if verbose > 0 { eprintln!("git add executed"); } - let raw_output = format!( - "{}\n{}", - String::from_utf8_lossy(&output.stdout), - String::from_utf8_lossy(&output.stderr) - ); + let raw_output = format!("{}\n{}", result.stdout, result.stderr); - if output.status.success() { + if result.success() { // Count what was added - let status_output = git_cmd(global_args) - .args(["diff", "--cached", "--stat", "--shortstat"]) - .output() - .context("Failed to check staged files")?; + let mut stat_cmd = git_cmd(global_args); + stat_cmd.args(["diff", "--cached", "--stat", "--shortstat"]); + let stat_result = + exec_capture(&mut stat_cmd).context("Failed to check staged files")?; - let stat = String::from_utf8_lossy(&status_output.stdout); - let compact = if stat.trim().is_empty() { + let compact = if stat_result.stdout.trim().is_empty() { "ok (nothing to add)".to_string() } else { // Parse "1 file changed, 5 insertions(+)" format - let short = stat.lines().last().unwrap_or("").trim(); + let short = stat_result.stdout.lines().last().unwrap_or("").trim(); if short.is_empty() { "ok".to_string() } else { @@ -873,16 +843,14 @@ fn run_add(args: &[String], verbose: u8, global_args: &[String]) -> Result &compact, ); } else { - let stderr = String::from_utf8_lossy(&output.stderr); - let stdout = String::from_utf8_lossy(&output.stdout); eprintln!("FAILED: git add"); - if !stderr.trim().is_empty() { - eprintln!("{}", stderr); + if !result.stderr.trim().is_empty() { + eprintln!("{}", result.stderr); } - if !stdout.trim().is_empty() { - eprintln!("{}", stdout); + if !result.stdout.trim().is_empty() { + eprintln!("{}", result.stdout); } - return Ok(exit_code_from_output(&output, "git")); + return Ok(result.exit_code); } Ok(0) @@ -906,17 +874,14 @@ fn run_commit(args: &[String], verbose: u8, global_args: &[String]) -> Result= 7 { @@ -935,7 +900,9 @@ fn run_commit(args: &[String], verbose: u8, global_args: &[String]) -> Result Result Result cmd.arg(arg); } - let output = cmd.output().context("Failed to run git push")?; + let cap = exec_capture(&mut cmd).context("Failed to run git push")?; - let stderr = String::from_utf8_lossy(&output.stderr); - let stdout = String::from_utf8_lossy(&output.stdout); - let raw = format!("{}{}", stdout, stderr); + let raw = format!("{}{}", cap.stdout, cap.stderr); - if output.status.success() { - let compact = if stderr.contains("Everything up-to-date") { + if cap.success() { + let compact = if cap.stderr.contains("Everything up-to-date") { "ok (up-to-date)".to_string() } else { - let mut result = String::new(); - for line in stderr.lines() { + let mut push_info = String::new(); + for line in cap.stderr.lines() { if line.contains("->") { let parts: Vec<&str> = line.split_whitespace().collect(); if parts.len() >= 3 { - result = format!("ok {}", parts[parts.len() - 1]); + push_info = format!("ok {}", parts[parts.len() - 1]); break; } } } - if !result.is_empty() { - result + if !push_info.is_empty() { + push_info } else { "ok".to_string() } @@ -1008,13 +973,13 @@ fn run_push(args: &[String], verbose: u8, global_args: &[String]) -> Result ); } else { eprintln!("FAILED: git push"); - if !stderr.trim().is_empty() { - eprintln!("{}", stderr); + if !cap.stderr.trim().is_empty() { + eprintln!("{}", cap.stderr); } - if !stdout.trim().is_empty() { - eprintln!("{}", stdout); + if !cap.stdout.trim().is_empty() { + eprintln!("{}", cap.stdout); } - return Ok(exit_code_from_output(&output, "git")); + return Ok(cap.exit_code); } Ok(0) @@ -1033,56 +998,55 @@ fn run_pull(args: &[String], verbose: u8, global_args: &[String]) -> Result cmd.arg(arg); } - let output = cmd.output().context("Failed to run git pull")?; + let result = exec_capture(&mut cmd).context("Failed to run git pull")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw_output = format!("{}\n{}", stdout, stderr); + let raw_output = format!("{}\n{}", result.stdout, result.stderr); - if output.status.success() { - let compact = - if stdout.contains("Already up to date") || stdout.contains("Already up-to-date") { - "ok (up-to-date)".to_string() - } else { - // Count files changed - let mut files = 0; - let mut insertions = 0; - let mut deletions = 0; - - for line in stdout.lines() { - if line.contains("file") && line.contains("changed") { - // Parse "3 files changed, 10 insertions(+), 2 deletions(-)" - for part in line.split(',') { - let part = part.trim(); - if part.contains("file") { - files = part - .split_whitespace() - .next() - .and_then(|n| n.parse().ok()) - .unwrap_or(0); - } else if part.contains("insertion") { - insertions = part - .split_whitespace() - .next() - .and_then(|n| n.parse().ok()) - .unwrap_or(0); - } else if part.contains("deletion") { - deletions = part - .split_whitespace() - .next() - .and_then(|n| n.parse().ok()) - .unwrap_or(0); - } + if result.success() { + let compact = if result.stdout.contains("Already up to date") + || result.stdout.contains("Already up-to-date") + { + "ok (up-to-date)".to_string() + } else { + // Count files changed + let mut files = 0; + let mut insertions = 0; + let mut deletions = 0; + + for line in result.stdout.lines() { + if line.contains("file") && line.contains("changed") { + // Parse "3 files changed, 10 insertions(+), 2 deletions(-)" + for part in line.split(',') { + let part = part.trim(); + if part.contains("file") { + files = part + .split_whitespace() + .next() + .and_then(|n| n.parse().ok()) + .unwrap_or(0); + } else if part.contains("insertion") { + insertions = part + .split_whitespace() + .next() + .and_then(|n| n.parse().ok()) + .unwrap_or(0); + } else if part.contains("deletion") { + deletions = part + .split_whitespace() + .next() + .and_then(|n| n.parse().ok()) + .unwrap_or(0); } } } + } - if files > 0 { - format!("ok {} files +{} -{}", files, insertions, deletions) - } else { - "ok".to_string() - } - }; + if files > 0 { + format!("ok {} files +{} -{}", files, insertions, deletions) + } else { + "ok".to_string() + } + }; println!("{}", compact); @@ -1094,13 +1058,13 @@ fn run_pull(args: &[String], verbose: u8, global_args: &[String]) -> Result ); } else { eprintln!("FAILED: git pull"); - if !stderr.trim().is_empty() { - eprintln!("{}", stderr); + if !result.stderr.trim().is_empty() { + eprintln!("{}", result.stderr); } - if !stdout.trim().is_empty() { - eprintln!("{}", stdout); + if !result.stdout.trim().is_empty() { + eprintln!("{}", result.stdout); } - return Ok(exit_code_from_output(&output, "git")); + return Ok(result.exit_code); } Ok(0) @@ -1153,19 +1117,17 @@ fn run_branch(args: &[String], verbose: u8, global_args: &[String]) -> Result Result Result Result Result Result") || l.contains("[new")) .count(); @@ -1382,23 +1338,26 @@ fn run_stash( match subcommand { Some("list") => { - let output = git_cmd(global_args) - .args(["stash", "list"]) - .output() - .context("Failed to run git stash list")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let raw = stdout.to_string(); - - if stdout.trim().is_empty() { + let mut cmd = git_cmd(global_args); + cmd.args(["stash", "list"]); + let result = + exec_capture(&mut cmd).context("Failed to run git stash list")?; + + if result.stdout.trim().is_empty() { let msg = "No stashes"; println!("{}", msg); - timer.track("git stash list", "rtk git stash list", &raw, msg); + timer.track("git stash list", "rtk git stash list", &result.stdout, msg); return Ok(0); } - let filtered = filter_stash_list(&stdout); + let filtered = filter_stash_list(&result.stdout); println!("{}", filtered); - timer.track("git stash list", "rtk git stash list", &raw, &filtered); + timer.track( + "git stash list", + "rtk git stash list", + &result.stdout, + &filtered, + ); } Some("show") => { let mut cmd = git_cmd(global_args); @@ -1406,21 +1365,25 @@ fn run_stash( for arg in args { cmd.arg(arg); } - let output = cmd.output().context("Failed to run git stash show")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let raw = stdout.to_string(); + let result = + exec_capture(&mut cmd).context("Failed to run git stash show")?; - let filtered = if stdout.trim().is_empty() { + let filtered = if result.stdout.trim().is_empty() { let msg = "Empty stash"; println!("{}", msg); msg.to_string() } else { - let compacted = compact_diff(&stdout, 100); + let compacted = compact_diff(&result.stdout, 100); println!("{}", compacted); compacted }; - timer.track("git stash show", "rtk git stash show", &raw, &filtered); + timer.track( + "git stash show", + "rtk git stash show", + &result.stdout, + &filtered, + ); } Some("pop") | Some("apply") | Some("drop") | Some("push") => { let sub = subcommand.unwrap(); @@ -1429,19 +1392,17 @@ fn run_stash( for arg in args { cmd.arg(arg); } - let output = cmd.output().context("Failed to run git stash")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let combined = format!("{}{}", stdout, stderr); + let result = exec_capture(&mut cmd).context("Failed to run git stash")?; + let combined = result.combined(); - let msg = if output.status.success() { + let msg = if result.success() { let msg = format!("ok stash {}", sub); println!("{}", msg); msg } else { eprintln!("FAILED: git stash {}", sub); - if !stderr.trim().is_empty() { - eprintln!("{}", stderr); + if !result.stderr.trim().is_empty() { + eprintln!("{}", result.stderr); } combined.clone() }; @@ -1453,8 +1414,8 @@ fn run_stash( &msg, ); - if !output.status.success() { - return Ok(exit_code_from_output(&output, "git")); + if !result.success() { + return Ok(result.exit_code); } } Some(sub) => { @@ -1464,19 +1425,17 @@ fn run_stash( for arg in args { cmd.arg(arg); } - let output = cmd.output().context("Failed to run git stash")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let combined = format!("{}{}", stdout, stderr); + let result = exec_capture(&mut cmd).context("Failed to run git stash")?; + let combined = result.combined(); - let msg = if output.status.success() { + let msg = if result.success() { let msg = format!("ok stash {}", sub); println!("{}", msg); msg } else { eprintln!("FAILED: git stash {}", sub); - if !stderr.trim().is_empty() { - eprintln!("{}", stderr); + if !result.stderr.trim().is_empty() { + eprintln!("{}", result.stderr); } combined.clone() }; @@ -1488,8 +1447,8 @@ fn run_stash( &msg, ); - if !output.status.success() { - return Ok(exit_code_from_output(&output, "git")); + if !result.success() { + return Ok(result.exit_code); } } None => { @@ -1499,13 +1458,11 @@ fn run_stash( for arg in args { cmd.arg(arg); } - let output = cmd.output().context("Failed to run git stash")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let combined = format!("{}{}", stdout, stderr); + let result = exec_capture(&mut cmd).context("Failed to run git stash")?; + let combined = result.combined(); - let msg = if output.status.success() { - if stdout.contains("No local changes") { + let msg = if result.success() { + if result.stdout.contains("No local changes") { let msg = "ok (nothing to stash)"; println!("{}", msg); msg.to_string() @@ -1516,16 +1473,16 @@ fn run_stash( } } else { eprintln!("FAILED: git stash"); - if !stderr.trim().is_empty() { - eprintln!("{}", stderr); + if !result.stderr.trim().is_empty() { + eprintln!("{}", result.stderr); } combined.clone() }; timer.track("git stash", "rtk git stash", &combined, &msg); - if !output.status.success() { - return Ok(exit_code_from_output(&output, "git")); + if !result.success() { + return Ok(result.exit_code); } } } @@ -1572,12 +1529,10 @@ fn run_worktree(args: &[String], verbose: u8, global_args: &[String]) -> Result< for arg in args { cmd.arg(arg); } - let output = cmd.output().context("Failed to run git worktree")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let combined = format!("{}{}", stdout, stderr); + let result = exec_capture(&mut cmd).context("Failed to run git worktree")?; + let combined = result.combined(); - let msg = if output.status.success() { + let msg = if result.success() { "ok" } else { &combined @@ -1590,30 +1545,32 @@ fn run_worktree(args: &[String], verbose: u8, global_args: &[String]) -> Result< msg, ); - if output.status.success() { + if result.success() { println!("ok"); } else { eprintln!("FAILED: git worktree {}", args.join(" ")); - if !stderr.trim().is_empty() { - eprintln!("{}", stderr); + if !result.stderr.trim().is_empty() { + eprintln!("{}", result.stderr); } - return Ok(exit_code_from_output(&output, "git")); + return Ok(result.exit_code); } return Ok(0); } // Default: list mode - let output = git_cmd(global_args) - .args(["worktree", "list"]) - .output() - .context("Failed to run git worktree list")?; - - let stdout = String::from_utf8_lossy(&output.stdout); - let raw = stdout.to_string(); + let mut cmd = git_cmd(global_args); + cmd.args(["worktree", "list"]); + let result = + exec_capture(&mut cmd).context("Failed to run git worktree list")?; - let filtered = filter_worktree_list(&stdout); + let filtered = filter_worktree_list(&result.stdout); println!("{}", filtered); - timer.track("git worktree list", "rtk git worktree", &raw, &filtered); + timer.track( + "git worktree list", + "rtk git worktree", + &result.stdout, + &filtered, + ); Ok(0) } diff --git a/src/cmds/git/gt_cmd.rs b/src/cmds/git/gt_cmd.rs index fdda867d0..bfa06fd2c 100644 --- a/src/cmds/git/gt_cmd.rs +++ b/src/cmds/git/gt_cmd.rs @@ -1,9 +1,8 @@ //! Filters Graphite (gt) CLI output for stacking workflows. +use crate::core::stream::exec_capture; use crate::core::tracking; -use crate::core::utils::{ - exit_code_from_output, ok_confirmation, resolved_command, strip_ansi, truncate, -}; +use crate::core::utils::{ok_confirmation, resolved_command, strip_ansi, truncate}; use anyhow::{Context, Result}; use lazy_static::lazy_static; use regex::Regex; @@ -43,34 +42,30 @@ fn run_gt_filtered( eprintln!("Running: gt {} {}", subcmd_str, args.join(" ")); } - let cmd_output = cmd.output().with_context(|| { + let cmd_output = exec_capture(&mut cmd).with_context(|| { format!( "Failed to run gt {}. Is gt (Graphite) installed?", subcmd_str ) })?; - let stdout = String::from_utf8_lossy(&cmd_output.stdout); - let stderr = String::from_utf8_lossy(&cmd_output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", cmd_output.stdout, cmd_output.stderr); - let exit_code = exit_code_from_output(&cmd_output, "gt"); - - let clean = strip_ansi(stdout.trim()); + let clean = strip_ansi(cmd_output.stdout.trim()); let output = if verbose > 0 { clean.clone() } else { filter_fn(&clean) }; - if let Some(hint) = crate::core::tee::tee_and_hint(&raw, tee_label, exit_code) { + if let Some(hint) = crate::core::tee::tee_and_hint(&raw, tee_label, cmd_output.exit_code) { println!("{}\n{}", output, hint); } else { println!("{}", output); } - if !stderr.trim().is_empty() { - eprintln!("{}", stderr.trim()); + if !cmd_output.stderr.trim().is_empty() { + eprintln!("{}", cmd_output.stderr.trim()); } let label = if args.is_empty() { @@ -81,7 +76,7 @@ fn run_gt_filtered( let rtk_label = format!("rtk {}", label); timer.track(&label, &rtk_label, &raw, &output); - Ok(exit_code) + Ok(cmd_output.exit_code) } fn filter_identity(input: &str) -> String { diff --git a/src/cmds/go/go_cmd.rs b/src/cmds/go/go_cmd.rs index b4276b332..ed927352c 100644 --- a/src/cmds/go/go_cmd.rs +++ b/src/cmds/go/go_cmd.rs @@ -2,7 +2,8 @@ use crate::core::runner; use crate::core::tracking; -use crate::core::utils::{exit_code_from_output, resolved_command, truncate}; +use crate::core::stream::exec_capture; +use crate::core::utils::{resolved_command, truncate}; use crate::golangci_cmd; use anyhow::{Context, Result}; use serde::Deserialize; @@ -133,16 +134,13 @@ pub fn run_other(args: &[OsString], verbose: u8) -> Result { eprintln!("Running: go {} ...", subcommand); } - let output = cmd - .output() + let result = exec_capture(&mut cmd) .with_context(|| format!("Failed to run go {}", subcommand))?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); - print!("{}", stdout); - eprint!("{}", stderr); + print!("{}", result.stdout); + eprint!("{}", result.stderr); timer.track( &format!("go {}", subcommand), @@ -151,26 +149,21 @@ pub fn run_other(args: &[OsString], verbose: u8) -> Result { &raw, // No filtering for unsupported commands ); - Ok(exit_code_from_output(&output, "go")) + Ok(result.exit_code) } /// Detect golangci-lint major version when invoked via `go tool`. /// Returns 1 on any failure (safe fallback — v1 behaviour). fn detect_go_tool_golangci_version() -> u32 { - let output = resolved_command("go") - .arg("tool") - .arg("golangci-lint") - .arg("--version") - .output(); - - match output { - Ok(o) => { - let stdout = String::from_utf8_lossy(&o.stdout); - let stderr = String::from_utf8_lossy(&o.stderr); - let version_text = if stdout.trim().is_empty() { - &*stderr + let mut cmd = resolved_command("go"); + cmd.arg("tool").arg("golangci-lint").arg("--version"); + + match exec_capture(&mut cmd) { + Ok(r) => { + let version_text = if r.stdout.trim().is_empty() { + &r.stderr } else { - &*stdout + &r.stdout }; golangci_cmd::parse_major_version(version_text) } @@ -249,26 +242,23 @@ fn run_go_tool_golangci_lint(args: &[OsString], verbose: u8) -> Result { } } - let output = cmd - .output() + let result = exec_capture(&mut cmd) .context("Failed to run go tool golangci-lint")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); // v2 outputs JSON on first line + trailing text; v1 outputs just JSON let json_output = if version >= 2 { - stdout.lines().next().unwrap_or("") + result.stdout.lines().next().unwrap_or("") } else { - &*stdout + &result.stdout }; let filtered = golangci_cmd::filter_golangci_json(json_output, version); println!("{}", filtered); - if !stderr.trim().is_empty() && verbose > 0 { - eprintln!("{}", stderr.trim()); + if !result.stderr.trim().is_empty() && verbose > 0 { + eprintln!("{}", result.stderr.trim()); } timer.track( @@ -278,10 +268,9 @@ fn run_go_tool_golangci_lint(args: &[OsString], verbose: u8) -> Result { &filtered, ); - let exit_code = exit_code_from_output(&output, "go tool golangci-lint"); // golangci-lint: exit 0 = clean, exit 1 = lint issues found (not an error), // exit 2+ = config/build error, None = killed by signal (OOM, SIGKILL) - Ok(if exit_code == 1 { 0 } else { exit_code }) + Ok(if result.exit_code == 1 { 0 } else { result.exit_code }) } pub(crate) fn filter_go_test_json(output: &str) -> String { diff --git a/src/cmds/go/golangci_cmd.rs b/src/cmds/go/golangci_cmd.rs index f24a9e059..b9e48e091 100644 --- a/src/cmds/go/golangci_cmd.rs +++ b/src/cmds/go/golangci_cmd.rs @@ -2,6 +2,7 @@ use crate::core::config; use crate::core::runner; +use crate::core::stream::exec_capture; use crate::core::utils::{resolved_command, truncate}; use anyhow::Result; use serde::Deserialize; @@ -63,16 +64,15 @@ pub(crate) fn parse_major_version(version_output: &str) -> u32 { /// Run `golangci-lint --version` and return the major version number. /// Returns 1 on any failure. pub(crate) fn detect_major_version() -> u32 { - let output = resolved_command("golangci-lint").arg("--version").output(); - - match output { - Ok(o) => { - let stdout = String::from_utf8_lossy(&o.stdout); - let stderr = String::from_utf8_lossy(&o.stderr); - let version_text = if stdout.trim().is_empty() { - &*stderr + let mut cmd = resolved_command("golangci-lint"); + cmd.arg("--version"); + + match exec_capture(&mut cmd) { + Ok(r) => { + let version_text = if r.stdout.trim().is_empty() { + &r.stderr } else { - &*stdout + &r.stdout }; parse_major_version(version_text) } diff --git a/src/cmds/js/lint_cmd.rs b/src/cmds/js/lint_cmd.rs index f407927d8..21eb528f5 100644 --- a/src/cmds/js/lint_cmd.rs +++ b/src/cmds/js/lint_cmd.rs @@ -1,6 +1,7 @@ //! Filters ESLint and Biome linter output, grouping violations by rule. use crate::core::config; +use crate::core::stream::exec_capture; use crate::core::tracking; use crate::core::utils::{package_manager_exec, resolved_command, truncate}; use crate::mypy_cmd; @@ -166,49 +167,42 @@ pub fn run(args: &[String], verbose: u8) -> Result { eprintln!("Running: {} with structured output", linter); } - let output = cmd.output().context(format!( + let result = exec_capture(&mut cmd).context(format!( "Failed to run {}. Is it installed? Try: pip install {} (or npm/pnpm for JS linters)", linter, linter ))?; // Check if process was killed by signal (SIGABRT, SIGKILL, etc.) - if !output.status.success() && output.status.code().is_none() { - let stderr = String::from_utf8_lossy(&output.stderr); + if !result.success() && result.exit_code > 128 { eprintln!("[warn] Linter process terminated abnormally (possibly out of memory)"); - if !stderr.is_empty() { + if !result.stderr.is_empty() { eprintln!( "stderr: {}", - stderr.lines().take(5).collect::>().join("\n") + result.stderr.lines().take(5).collect::>().join("\n") ); } - return Ok(crate::core::utils::exit_code_from_output(&output, "eslint")); + return Ok(result.exit_code); } - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); // Dispatch to appropriate filter based on linter let filtered = match linter { - "eslint" => filter_eslint_json(&stdout), + "eslint" => filter_eslint_json(&result.stdout), "ruff" => { // Reuse ruff_cmd's JSON parser - if !stdout.trim().is_empty() { - ruff_cmd::filter_ruff_check_json(&stdout) + if !result.stdout.trim().is_empty() { + ruff_cmd::filter_ruff_check_json(&result.stdout) } else { "Ruff: No issues found".to_string() } } - "pylint" => filter_pylint_json(&stdout), + "pylint" => filter_pylint_json(&result.stdout), "mypy" => mypy_cmd::filter_mypy_output(&raw), _ => filter_generic_lint(&raw), }; - let exit_code = output - .status - .code() - .unwrap_or(if output.status.success() { 0 } else { 1 }); - if let Some(hint) = crate::core::tee::tee_and_hint(&raw, "lint", exit_code) { + if let Some(hint) = crate::core::tee::tee_and_hint(&raw, "lint", result.exit_code) { println!("{}\n{}", filtered, hint); } else { println!("{}", filtered); @@ -221,8 +215,8 @@ pub fn run(args: &[String], verbose: u8) -> Result { &filtered, ); - if !output.status.success() { - return Ok(crate::core::utils::exit_code_from_output(&output, "eslint")); + if !result.success() { + return Ok(result.exit_code); } Ok(0) diff --git a/src/cmds/js/playwright_cmd.rs b/src/cmds/js/playwright_cmd.rs index e5077fa0f..bfa41ac27 100644 --- a/src/cmds/js/playwright_cmd.rs +++ b/src/cmds/js/playwright_cmd.rs @@ -1,5 +1,6 @@ //! Filters Playwright E2E test output to show only failures. +use crate::core::stream::exec_capture; use crate::core::tracking; use crate::core::utils::{detect_package_manager, resolved_command, strip_ansi}; use anyhow::{Context, Result}; @@ -285,16 +286,13 @@ pub fn run(args: &[String], verbose: u8) -> Result { eprintln!("Running: playwright {}", args.join(" ")); } - let output = cmd - .output() + let result = exec_capture(&mut cmd) .context("Failed to run playwright (try: npm install -g playwright)")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); // Parse output using PlaywrightParser - let parse_result = PlaywrightParser::parse(&stdout); + let parse_result = PlaywrightParser::parse(&result.stdout); let mode = FormatMode::from_verbosity(verbose); let filtered = match parse_result { @@ -316,8 +314,7 @@ pub fn run(args: &[String], verbose: u8) -> Result { } }; - let exit_code = crate::core::utils::exit_code_from_output(&output, "playwright"); - if let Some(hint) = crate::core::tee::tee_and_hint(&raw, "playwright", exit_code) { + if let Some(hint) = crate::core::tee::tee_and_hint(&raw, "playwright", result.exit_code) { println!("{}\n{}", filtered, hint); } else { println!("{}", filtered); @@ -331,8 +328,8 @@ pub fn run(args: &[String], verbose: u8) -> Result { ); // Preserve exit code for CI/CD - if !output.status.success() { - return Ok(exit_code); + if !result.success() { + return Ok(result.exit_code); } Ok(0) diff --git a/src/cmds/js/pnpm_cmd.rs b/src/cmds/js/pnpm_cmd.rs index 9ec832620..5e60cf49d 100644 --- a/src/cmds/js/pnpm_cmd.rs +++ b/src/cmds/js/pnpm_cmd.rs @@ -1,5 +1,6 @@ //! Filters pnpm output — dependency trees, install logs, outdated packages. +use crate::core::stream::exec_capture; use crate::core::tracking; use crate::core::utils::resolved_command; use anyhow::{Context, Result}; @@ -305,18 +306,15 @@ fn run_list(depth: usize, args: &[String], verbose: u8) -> Result { cmd.arg(arg); } - let output = cmd.output().context("Failed to run pnpm list")?; + let result = exec_capture(&mut cmd).context("Failed to run pnpm list")?; - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - eprint!("{}", stderr); - return Ok(crate::core::utils::exit_code_from_output(&output, "pnpm")); + if !result.success() { + eprint!("{}", result.stderr); + return Ok(result.exit_code); } - let stdout = String::from_utf8_lossy(&output.stdout); - // Parse output using PnpmListParser - let parse_result = PnpmListParser::parse(&stdout); + let parse_result = PnpmListParser::parse(&result.stdout); let mode = FormatMode::from_verbosity(verbose); let filtered = match parse_result { @@ -343,7 +341,7 @@ fn run_list(depth: usize, args: &[String], verbose: u8) -> Result { timer.track( &format!("pnpm list --depth={}", depth), &format!("rtk pnpm list --depth={}", depth), - &stdout, + &result.stdout, &filtered, ); @@ -362,13 +360,11 @@ fn run_outdated(args: &[String], verbose: u8) -> Result { cmd.arg(arg); } - let output = cmd.output().context("Failed to run pnpm outdated")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let combined = format!("{}{}", stdout, stderr); + let result = exec_capture(&mut cmd).context("Failed to run pnpm outdated")?; + let combined = result.combined(); // Parse output using PnpmOutdatedParser - let parse_result = PnpmOutdatedParser::parse(&stdout); + let parse_result = PnpmOutdatedParser::parse(&result.stdout); let mode = FormatMode::from_verbosity(verbose); let filtered = match parse_result { @@ -429,16 +425,14 @@ fn run_install(packages: &[String], args: &[String], verbose: u8) -> Result eprintln!("pnpm install running..."); } - let output = cmd.output().context("Failed to run pnpm install")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); + let result = exec_capture(&mut cmd).context("Failed to run pnpm install")?; - if !output.status.success() { - eprint!("{}", stderr); - return Ok(crate::core::utils::exit_code_from_output(&output, "pnpm")); + if !result.success() { + eprint!("{}", result.stderr); + return Ok(result.exit_code); } - let combined = format!("{}{}", stdout, stderr); + let combined = result.combined(); let filtered = filter_pnpm_install(&combined); println!("{}", filtered); diff --git a/src/cmds/js/prisma_cmd.rs b/src/cmds/js/prisma_cmd.rs index b96ca392d..397236907 100644 --- a/src/cmds/js/prisma_cmd.rs +++ b/src/cmds/js/prisma_cmd.rs @@ -1,5 +1,6 @@ //! Filters Prisma CLI output by stripping ASCII art and verbose decoration. +use crate::core::stream::exec_capture; use crate::core::tracking; use crate::core::utils::{resolved_command, tool_exists}; use anyhow::{Context, Result}; @@ -52,24 +53,20 @@ fn run_generate(args: &[String], verbose: u8) -> Result { eprintln!("Running: prisma generate"); } - let output = cmd - .output() + let result = exec_capture(&mut cmd) .context("Failed to run prisma generate (try: npm install -g prisma)")?; - let exit_code = crate::core::utils::exit_code_from_output(&output, "prisma"); - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); - if !output.status.success() { - if !stdout.trim().is_empty() { - eprint!("{}", stdout); + if !result.success() { + if !result.stdout.trim().is_empty() { + eprint!("{}", result.stdout); } - if !stderr.trim().is_empty() { - eprint!("{}", stderr); + if !result.stderr.trim().is_empty() { + eprint!("{}", result.stderr); } timer.track("prisma generate", "rtk prisma generate", &raw, &raw); - return Ok(exit_code); + return Ok(result.exit_code); } let filtered = filter_prisma_generate(&raw); @@ -111,22 +108,19 @@ fn run_migrate(subcommand: MigrateSubcommand, args: &[String], verbose: u8) -> R eprintln!("Running: {}", cmd_name); } - let output = cmd.output().context("Failed to run prisma migrate")?; + let result = exec_capture(&mut cmd).context("Failed to run prisma migrate")?; - let exit_code = crate::core::utils::exit_code_from_output(&output, "prisma"); - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); - if !output.status.success() { - if !stdout.trim().is_empty() { - eprint!("{}", stdout); + if !result.success() { + if !result.stdout.trim().is_empty() { + eprint!("{}", result.stdout); } - if !stderr.trim().is_empty() { - eprint!("{}", stderr); + if !result.stderr.trim().is_empty() { + eprint!("{}", result.stderr); } timer.track(cmd_name, &format!("rtk {}", cmd_name), &raw, &raw); - return Ok(exit_code); + return Ok(result.exit_code); } let filtered = match subcommand { @@ -155,22 +149,19 @@ fn run_db_push(args: &[String], verbose: u8) -> Result { eprintln!("Running: prisma db push"); } - let output = cmd.output().context("Failed to run prisma db push")?; + let result = exec_capture(&mut cmd).context("Failed to run prisma db push")?; - let exit_code = crate::core::utils::exit_code_from_output(&output, "prisma"); - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); - if !output.status.success() { - if !stdout.trim().is_empty() { - eprint!("{}", stdout); + if !result.success() { + if !result.stdout.trim().is_empty() { + eprint!("{}", result.stdout); } - if !stderr.trim().is_empty() { - eprint!("{}", stderr); + if !result.stderr.trim().is_empty() { + eprint!("{}", result.stderr); } timer.track("prisma db push", "rtk prisma db push", &raw, &raw); - return Ok(exit_code); + return Ok(result.exit_code); } let filtered = filter_db_push(&raw); diff --git a/src/cmds/js/vitest_cmd.rs b/src/cmds/js/vitest_cmd.rs index 4f5e9ae47..ab247c901 100644 --- a/src/cmds/js/vitest_cmd.rs +++ b/src/cmds/js/vitest_cmd.rs @@ -4,6 +4,7 @@ use anyhow::{Context, Result}; use regex::Regex; use serde::Deserialize; +use crate::core::stream::exec_capture; use crate::core::tracking; use crate::core::utils::{package_manager_exec, strip_ansi}; use crate::parser::{ @@ -234,13 +235,11 @@ fn run_vitest(args: &[String], verbose: u8) -> Result { cmd.arg(arg); } - let output = cmd.output().context("Failed to run vitest")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let combined = format!("{}{}", stdout, stderr); + let result = exec_capture(&mut cmd).context("Failed to run vitest")?; + let combined = result.combined(); // Parse output using VitestParser - let parse_result = VitestParser::parse(&stdout); + let parse_result = VitestParser::parse(&result.stdout); let mode = FormatMode::from_verbosity(verbose); let filtered = match parse_result { @@ -262,8 +261,7 @@ fn run_vitest(args: &[String], verbose: u8) -> Result { } }; - let exit_code = crate::core::utils::exit_code_from_output(&output, "vitest"); - if let Some(hint) = crate::core::tee::tee_and_hint(&combined, "vitest_run", exit_code) { + if let Some(hint) = crate::core::tee::tee_and_hint(&combined, "vitest_run", result.exit_code) { println!("{}\n{}", filtered, hint); } else { println!("{}", filtered); @@ -271,8 +269,8 @@ fn run_vitest(args: &[String], verbose: u8) -> Result { timer.track("vitest run", "rtk vitest run", &combined, &filtered); - if !output.status.success() { - return Ok(exit_code); + if !result.success() { + return Ok(result.exit_code); } Ok(0) } diff --git a/src/cmds/python/pip_cmd.rs b/src/cmds/python/pip_cmd.rs index 00090d425..ddb56c47e 100644 --- a/src/cmds/python/pip_cmd.rs +++ b/src/cmds/python/pip_cmd.rs @@ -1,7 +1,8 @@ //! Filters pip and uv package manager output. +use crate::core::stream::exec_capture; use crate::core::tracking; -use crate::core::utils::{exit_code_from_output, resolved_command, tool_exists}; +use crate::core::utils::{resolved_command, tool_exists}; use anyhow::{Context, Result}; use serde::Deserialize; @@ -67,19 +68,15 @@ fn run_list(base_cmd: &str, args: &[String], verbose: u8) -> Result<(String, Str eprintln!("Running: {} pip list --format=json", base_cmd); } - let output = cmd - .output() + let result = exec_capture(&mut cmd) .with_context(|| format!("Failed to run {} pip list", base_cmd))?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); - let filtered = filter_pip_list(&stdout); + let filtered = filter_pip_list(&result.stdout); println!("{}", filtered); - let exit_code = exit_code_from_output(&output, "pip"); - Ok((raw, filtered, exit_code)) + Ok((raw, filtered, result.exit_code)) } fn run_outdated(base_cmd: &str, args: &[String], verbose: u8) -> Result<(String, String, i32)> { @@ -99,19 +96,15 @@ fn run_outdated(base_cmd: &str, args: &[String], verbose: u8) -> Result<(String, eprintln!("Running: {} pip list --outdated --format=json", base_cmd); } - let output = cmd - .output() + let result = exec_capture(&mut cmd) .with_context(|| format!("Failed to run {} pip list --outdated", base_cmd))?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); - let filtered = filter_pip_outdated(&stdout); + let filtered = filter_pip_outdated(&result.stdout); println!("{}", filtered); - let exit_code = exit_code_from_output(&output, "pip"); - Ok((raw, filtered, exit_code)) + Ok((raw, filtered, result.exit_code)) } fn run_passthrough(base_cmd: &str, args: &[String], verbose: u8) -> Result<(String, String, i32)> { @@ -129,19 +122,15 @@ fn run_passthrough(base_cmd: &str, args: &[String], verbose: u8) -> Result<(Stri eprintln!("Running: {} pip {}", base_cmd, args.join(" ")); } - let output = cmd - .output() + let result = exec_capture(&mut cmd) .with_context(|| format!("Failed to run {} pip {}", base_cmd, args.join(" ")))?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); - print!("{}", stdout); - eprint!("{}", stderr); + print!("{}", result.stdout); + eprint!("{}", result.stderr); - let exit_code = exit_code_from_output(&output, "pip"); - Ok((raw.clone(), raw, exit_code)) + Ok((raw.clone(), raw, result.exit_code)) } /// Filter pip list JSON output diff --git a/src/cmds/system/format_cmd.rs b/src/cmds/system/format_cmd.rs index 6398b353e..e147640ea 100644 --- a/src/cmds/system/format_cmd.rs +++ b/src/cmds/system/format_cmd.rs @@ -1,7 +1,8 @@ //! Runs code formatters (Prettier, Ruff) and shows only files that changed. +use crate::core::stream::exec_capture; use crate::core::tracking; -use crate::core::utils::{exit_code_from_output, package_manager_exec, resolved_command}; +use crate::core::utils::{package_manager_exec, resolved_command}; use crate::prettier_cmd; use crate::ruff_cmd; use anyhow::{Context, Result}; @@ -111,14 +112,12 @@ pub fn run(args: &[String], verbose: u8) -> Result { eprintln!("Running: {} {}", formatter, user_args.join(" ")); } - let output = cmd.output().context(format!( + let result = exec_capture(&mut cmd).context(format!( "Failed to run {}. Is it installed? Try: pip install {} (or npm/pnpm for JS formatters)", formatter, formatter ))?; - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let raw = format!("{}\n{}", result.stdout, result.stderr); // Dispatch to appropriate filter based on formatter let filtered = match formatter.as_str() { @@ -137,7 +136,7 @@ pub fn run(args: &[String], verbose: u8) -> Result { &filtered, ); - Ok(exit_code_from_output(&output, "format")) + Ok(result.exit_code) } /// Filter black output - show files that need formatting diff --git a/src/cmds/system/grep_cmd.rs b/src/cmds/system/grep_cmd.rs index e9738fc1d..f163f4f85 100644 --- a/src/cmds/system/grep_cmd.rs +++ b/src/cmds/system/grep_cmd.rs @@ -1,8 +1,9 @@ //! Filters grep output by grouping matches by file. use crate::core::config; +use crate::core::stream::exec_capture; use crate::core::tracking; -use crate::core::utils::{exit_code_from_output, resolved_command}; +use crate::core::utils::resolved_command; use anyhow::{Context, Result}; use regex::Regex; use std::collections::HashMap; @@ -42,26 +43,22 @@ pub fn run( rg_cmd.arg(arg); } - let output = rg_cmd - .output() + let result = exec_capture(&mut rg_cmd) .or_else(|_| { - resolved_command("grep") - .args(["-rn", pattern, path]) - .output() + let mut grep_cmd = resolved_command("grep"); + grep_cmd.args(["-rn", pattern, path]); + exec_capture(&mut grep_cmd) }) .context("grep/rg failed")?; - let stdout = String::from_utf8_lossy(&output.stdout); - let exit_code = exit_code_from_output(&output, "grep"); + let exit_code = result.exit_code; + let raw_output = result.stdout.clone(); - let raw_output = stdout.to_string(); - - if stdout.trim().is_empty() { + if result.stdout.trim().is_empty() { // Show stderr for errors (bad regex, missing file, etc.) if exit_code == 2 { - let stderr = String::from_utf8_lossy(&output.stderr); - if !stderr.trim().is_empty() { - eprintln!("{}", stderr.trim()); + if !result.stderr.trim().is_empty() { + eprintln!("{}", result.stderr.trim()); } } let msg = format!("0 matches for '{}'", pattern); @@ -85,7 +82,7 @@ pub fn run( None }; - for line in stdout.lines() { + for line in result.stdout.lines() { let parts: Vec<&str> = line.splitn(3, ':').collect(); let (file, line_num, content) = if parts.len() == 3 { diff --git a/src/cmds/system/summary.rs b/src/cmds/system/summary.rs index 4ab4bb644..c6e2ec051 100644 --- a/src/cmds/system/summary.rs +++ b/src/cmds/system/summary.rs @@ -1,10 +1,11 @@ //! Runs a command and produces a heuristic summary of its output. +use crate::core::stream::exec_capture; use crate::core::tracking; use crate::core::utils::truncate; use anyhow::{Context, Result}; use regex::Regex; -use std::process::{Command, Stdio}; +use std::process::Command; /// Run a command and provide a heuristic summary pub fn run(command: &str, verbose: u8) -> Result { @@ -14,31 +15,23 @@ pub fn run(command: &str, verbose: u8) -> Result { eprintln!("Running and summarizing: {}", command); } - let output = if cfg!(target_os = "windows") { - Command::new("cmd") - .args(["/C", command]) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .output() + let mut cmd = if cfg!(target_os = "windows") { + let mut c = Command::new("cmd"); + c.args(["/C", command]); + c } else { - Command::new("sh") - .args(["-c", command]) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .output() - } - .context("Failed to execute command")?; - - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let raw = format!("{}\n{}", stdout, stderr); + let mut c = Command::new("sh"); + c.args(["-c", command]); + c + }; + let result = exec_capture(&mut cmd).context("Failed to execute command")?; - let exit_code = crate::core::utils::exit_code_from_output(&output, command); + let raw = format!("{}\n{}", result.stdout, result.stderr); - let summary = summarize_output(&raw, command, output.status.success()); + let summary = summarize_output(&raw, command, result.success()); println!("{}", summary); timer.track(command, "rtk summary", &raw, &summary); - Ok(exit_code) + Ok(result.exit_code) } fn summarize_output(output: &str, command: &str, success: bool) -> String { diff --git a/src/core/stream.rs b/src/core/stream.rs index 48bdde820..d4c3e3c96 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -228,6 +228,32 @@ pub fn run_streaming( }) } +pub struct CaptureResult { + pub stdout: String, + pub stderr: String, + pub exit_code: i32, +} + +impl CaptureResult { + pub fn success(&self) -> bool { + self.exit_code == 0 + } + + pub fn combined(&self) -> String { + format!("{}{}", self.stdout, self.stderr) + } +} + +pub fn exec_capture(cmd: &mut Command) -> Result { + cmd.stdin(Stdio::null()); + let output = cmd.output().context("Failed to execute command")?; + Ok(CaptureResult { + stdout: String::from_utf8_lossy(&output.stdout).into_owned(), + stderr: String::from_utf8_lossy(&output.stderr).into_owned(), + exit_code: status_to_exit_code(output.status), + }) +} + #[cfg(test)] mod tests { use super::*; @@ -424,4 +450,50 @@ mod tests { let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); assert_eq!(result.filtered.trim(), result.raw.trim()); } + + #[test] + fn test_exec_capture_success() { + let mut cmd = Command::new("echo"); + cmd.arg("hello_capture"); + let result = exec_capture(&mut cmd).unwrap(); + assert!(result.success()); + assert_eq!(result.exit_code, 0); + assert!(result.stdout.contains("hello_capture")); + } + + #[test] + fn test_exec_capture_failure() { + let mut cmd = Command::new("false"); + let result = exec_capture(&mut cmd).unwrap(); + assert!(!result.success()); + assert_eq!(result.exit_code, 1); + } + + #[test] + fn test_exec_capture_stderr() { + let mut cmd = Command::new("sh"); + cmd.args(["-c", "echo err_msg >&2"]); + let result = exec_capture(&mut cmd).unwrap(); + assert!(result.stderr.contains("err_msg")); + } + + #[test] + fn test_exec_capture_combined() { + let mut cmd = Command::new("sh"); + cmd.args(["-c", "echo out_msg; echo err_msg >&2"]); + let result = exec_capture(&mut cmd).unwrap(); + let combined = result.combined(); + assert!(combined.contains("out_msg")); + assert!(combined.contains("err_msg")); + } + + #[test] + fn test_capture_result_combined_empty() { + let r = CaptureResult { + stdout: String::new(), + stderr: String::new(), + exit_code: 0, + }; + assert_eq!(r.combined(), ""); + } } diff --git a/src/hooks/permissions.rs b/src/hooks/permissions.rs index 67a1d14a1..32ad0f6f6 100644 --- a/src/hooks/permissions.rs +++ b/src/hooks/permissions.rs @@ -1,4 +1,5 @@ use super::constants::{CLAUDE_DIR, SETTINGS_JSON, SETTINGS_LOCAL_JSON}; +use crate::core::stream::exec_capture; use serde_json::Value; use std::path::PathBuf; @@ -140,14 +141,12 @@ fn find_project_root() -> Option { } // Fallback: git (spawns a subprocess, slower but handles monorepo layouts). - let output = std::process::Command::new("git") - .args(["rev-parse", "--show-toplevel"]) - .output() - .ok()?; - - if output.status.success() { - let path = String::from_utf8(output.stdout).ok()?; - return Some(PathBuf::from(path.trim())); + let mut cmd = std::process::Command::new("git"); + cmd.args(["rev-parse", "--show-toplevel"]); + let result = exec_capture(&mut cmd).ok()?; + + if result.success() { + return Some(PathBuf::from(result.stdout.trim())); } None From 63313a25b6e15749fe047376414a9bd6bdf811a7 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 5 Apr 2026 13:28:08 +0200 Subject: [PATCH 06/44] fix(stream): skip printf-based tests on Win --- src/core/stream.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/core/stream.rs b/src/core/stream.rs index d4c3e3c96..8a5001fc6 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -368,6 +368,7 @@ mod tests { assert!(!result.success()); } + #[cfg(not(windows))] #[test] fn test_run_streaming_streaming_filter_drops_lines() { let mut cmd = Command::new("printf"); @@ -391,6 +392,7 @@ mod tests { assert_eq!(result.exit_code, 0); } + #[cfg(not(windows))] #[test] fn test_run_streaming_buffered_filter() { let mut cmd = Command::new("printf"); From 41ebf0babbf6aee21a267230e8c101cc67f6b6a2 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 5 Apr 2026 14:08:50 +0200 Subject: [PATCH 07/44] fix(hooks): permission checks, audit log injection, stdin limits --- src/hooks/hook_cmd.rs | 94 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 77 insertions(+), 17 deletions(-) diff --git a/src/hooks/hook_cmd.rs b/src/hooks/hook_cmd.rs index a39cebae2..d270673ab 100644 --- a/src/hooks/hook_cmd.rs +++ b/src/hooks/hook_cmd.rs @@ -11,6 +11,20 @@ use std::io::{self, Read, Write}; use crate::discover::registry::{has_heredoc, rewrite_command}; +const STDIN_CAP: usize = 1_048_576; // 1 MiB + +fn read_stdin_limited() -> Result { + let mut input = String::new(); + io::stdin() + .take((STDIN_CAP + 1) as u64) + .read_to_string(&mut input) + .context("Failed to read stdin")?; + if input.len() > STDIN_CAP { + anyhow::bail!("hook stdin exceeds {} byte limit", STDIN_CAP); + } + Ok(input) +} + // ── Copilot hook (VS Code + Copilot CLI) ────────────────────── /// Format detected from the preToolUse JSON input. @@ -26,10 +40,7 @@ enum HookFormat { /// Run the Copilot preToolUse hook. /// Auto-detects VS Code Copilot Chat vs Copilot CLI format. pub fn run_copilot() -> Result<()> { - let mut input = String::new(); - io::stdin() - .read_to_string(&mut input) - .context("Failed to read stdin")?; + let input = read_stdin_limited()?; let input = input.trim(); if input.is_empty() { @@ -142,6 +153,10 @@ fn handle_vscode(cmd: &str) -> Result<()> { } fn handle_copilot_cli(cmd: &str) -> Result<()> { + if permissions::check_command(cmd) == PermissionVerdict::Deny { + return Ok(()); + } + let rewritten = match get_rewritten(cmd) { Some(r) => r, None => return Ok(()), @@ -162,10 +177,7 @@ fn handle_copilot_cli(cmd: &str) -> Result<()> { /// Run the Gemini CLI BeforeTool hook. pub fn run_gemini() -> Result<()> { - let mut input = String::new(); - io::stdin() - .read_to_string(&mut input) - .context("Failed to read hook input from stdin")?; + let input = read_stdin_limited()?; let json: Value = serde_json::from_str(&input).context("Failed to parse hook input as JSON")?; @@ -233,6 +245,11 @@ fn audit_log(action: &str, original: &str, rewritten: &str) { let _ = audit_log_inner(action, original, rewritten); } +/// Escape newlines to prevent log-line injection in the pipe-delimited audit log. +fn sanitize_log_field(s: &str) -> String { + s.replace('\n', "\\n").replace('\r', "\\r") +} + fn audit_log_inner(action: &str, original: &str, rewritten: &str) -> Option<()> { let home = dirs::home_dir()?; let dir = home.join(".local").join("share").join("rtk"); @@ -244,17 +261,22 @@ fn audit_log_inner(action: &str, original: &str, rewritten: &str) -> Option<()> .open(path) .ok()?; let ts = chrono::Local::now().format("%Y-%m-%dT%H:%M:%S"); - writeln!(file, "{} | {} | {} | {}", ts, action, original, rewritten).ok() + writeln!( + file, + "{} | {} | {} | {}", + ts, + action, + sanitize_log_field(original), + sanitize_log_field(rewritten) + ) + .ok() } // ── Claude Code native hook ──────────────────────────────────── /// Run the Claude Code PreToolUse hook natively. pub fn run_claude() -> Result<()> { - let mut input = String::new(); - io::stdin() - .read_to_string(&mut input) - .context("Failed to read stdin")?; + let input = read_stdin_limited()?; let input = input.trim(); if input.is_empty() { @@ -365,10 +387,7 @@ fn run_claude_inner(input: &str) -> Option { /// Run the Cursor Agent hook natively. pub fn run_cursor() -> Result<()> { - let mut input = String::new(); - io::stdin() - .read_to_string(&mut input) - .context("Failed to read stdin")?; + let input = read_stdin_limited()?; let input = input.trim(); if input.is_empty() { @@ -396,6 +415,11 @@ pub fn run_cursor() -> Result<()> { } }; + if permissions::check_command(&cmd) == PermissionVerdict::Deny { + let _ = writeln!(io::stdout(), "{{}}"); + return Ok(()); + } + let rewritten = match get_rewritten(&cmd) { Some(r) => r, None => { @@ -429,6 +453,10 @@ fn run_cursor_inner(input: &str) -> String { None => return "{}".to_string(), }; + if permissions::check_command(&cmd) == PermissionVerdict::Deny { + return "{}".to_string(); + } + match get_rewritten(&cmd) { Some(rewritten) => { let output = json!({ @@ -781,4 +809,36 @@ mod tests { let _ = std::fs::remove_dir_all(&tmp); } + + // --- Adversarial tests --- + + #[test] + fn test_audit_log_sanitizes_newlines() { + let sanitized = sanitize_log_field("git status\nfake | inject | evil"); + assert!(!sanitized.contains('\n')); + assert!(sanitized.contains("\\n")); + } + + #[test] + fn test_claude_unicode_null_passthrough() { + let input = claude_input("git status \u{0000}\u{FEFF}"); + let _ = run_claude_inner(&input); + } + + #[test] + fn test_claude_extremely_long_command() { + let long_cmd = format!("git status {}", "A".repeat(100_000)); + let input = claude_input(&long_cmd); + let _ = run_claude_inner(&input); + } + + #[test] + fn test_cursor_deny_blocks_rewrite() { + use super::permissions::check_command_with_rules; + let deny = vec!["git status".to_string()]; + assert_eq!( + check_command_with_rules("git status", &deny, &[], &[]), + PermissionVerdict::Deny + ); + } } From 0d07f04d03dd5f09b10dd7aea5a5479f0612da71 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 5 Apr 2026 14:09:13 +0200 Subject: [PATCH 08/44] fix(core): error handl, RAW_CAP, recursion guard, atomic writes --- src/core/stream.rs | 18 ++++++++-------- src/discover/registry.rs | 20 ++++++++++-------- src/hooks/init.rs | 44 +++++++++++++++++----------------------- src/hooks/permissions.rs | 4 ++++ 4 files changed, 45 insertions(+), 41 deletions(-) diff --git a/src/core/stream.rs b/src/core/stream.rs index 8a5001fc6..b9137ac44 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -152,7 +152,7 @@ pub fn run_streaming( match stdout_mode { FilterMode::Passthrough => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() < RAW_CAP { + if raw_stdout.len() + line.len() + 1 <= RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); } @@ -166,7 +166,7 @@ pub fn run_streaming( } FilterMode::Streaming(mut filter) => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() < RAW_CAP { + if raw_stdout.len() + line.len() + 1 <= RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); } @@ -189,14 +189,13 @@ pub fn run_streaming( } FilterMode::Buffered(filter_fn) => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() < RAW_CAP { + if raw_stdout.len() + line.len() + 1 <= RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); } } - let result = filter_fn(&raw_stdout); - filtered = result.clone(); - match write!(out, "{}", result) { + filtered = filter_fn(&raw_stdout); + match write!(out, "{}", filtered) { Err(e) if e.kind() == io::ErrorKind::BrokenPipe => {} Err(e) => return Err(e.into()), Ok(_) => {} @@ -204,7 +203,7 @@ pub fn run_streaming( } FilterMode::CaptureOnly => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() < RAW_CAP { + if raw_stdout.len() + line.len() + 1 <= RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); } @@ -214,7 +213,10 @@ pub fn run_streaming( } } - let raw_stderr = stderr_thread.join().unwrap_or_else(|_| String::new()); + let raw_stderr = stderr_thread.join().unwrap_or_else(|e| { + eprintln!("[rtk] warning: stderr reader thread panicked: {:?}", e); + String::new() + }); if let Some(t) = stdin_thread { t.join().ok(); } diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 1643194c8..4a295e256 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -485,24 +485,28 @@ fn rewrite_line_range(cmd: &str) -> Option { /// but don't change which command runs. Strip before routing, re-prepend after. const SHELL_PREFIX_BUILTINS: &[&str] = &["noglob", "command", "builtin", "exec", "nocorrect"]; -/// Rewrite a single (non-compound) command segment. -/// Returns `Some(rewritten)` if matched (including already-RTK pass-through). -/// Returns `None` if no match (caller uses original segment). +const MAX_PREFIX_DEPTH: usize = 10; + fn rewrite_segment(seg: &str, excluded: &[String]) -> Option { + rewrite_segment_inner(seg, excluded, 0) +} + +fn rewrite_segment_inner(seg: &str, excluded: &[String], depth: usize) -> Option { let trimmed = seg.trim(); if trimmed.is_empty() { return None; } - // Peel shell prefix builtins (noglob, command, builtin, exec, nocorrect) - // before routing, re-prepend after. + if depth >= MAX_PREFIX_DEPTH { + return None; + } + for &prefix in SHELL_PREFIX_BUILTINS { if let Some(rest) = strip_word_prefix(trimmed, prefix) { if rest.is_empty() { - return None; // bare "noglob" etc. — nothing to rewrite + return None; } - // Recursively rewrite the inner command - return match rewrite_segment(rest, excluded) { + return match rewrite_segment_inner(rest, excluded, depth + 1) { Some(rewritten) => Some(format!("{} {}", prefix, rewritten)), None => None, }; diff --git a/src/hooks/init.rs b/src/hooks/init.rs index ae0bd424b..00a18f0b8 100644 --- a/src/hooks/init.rs +++ b/src/hooks/init.rs @@ -302,7 +302,7 @@ fn write_if_changed(path: &Path, content: &str, name: &str, verbose: u8) -> Resu } Ok(false) } else { - fs::write(path, content) + atomic_write(path, content) .with_context(|| format!("Failed to write {}: {}", name, path.display()))?; if verbose > 0 { eprintln!("Updated {}: {}", name, path.display()); @@ -310,7 +310,7 @@ fn write_if_changed(path: &Path, content: &str, name: &str, verbose: u8) -> Resu Ok(true) } } else { - fs::write(path, content) + atomic_write(path, content) .with_context(|| format!("Failed to write {}: {}", name, path.display()))?; if verbose > 0 { eprintln!("Created {}: {}", name, path.display()); @@ -682,8 +682,7 @@ fn patch_settings_json_command( } } - // Deep-merge hook - insert_hook_entry(&mut root, hook_command); + insert_hook_entry(&mut root, hook_command)?; // Backup original if settings_path.exists() { @@ -748,31 +747,27 @@ fn clean_double_blanks(content: &str) -> String { /// Deep-merge RTK hook entry into settings.json /// Creates hooks.PreToolUse structure if missing, preserves existing hooks -fn insert_hook_entry(root: &mut serde_json::Value, hook_command: &str) { - // Ensure root is an object +fn insert_hook_entry(root: &mut serde_json::Value, hook_command: &str) -> Result<()> { let root_obj = match root.as_object_mut() { Some(obj) => obj, None => { *root = serde_json::json!({}); - root.as_object_mut() - .expect("Just created object, must succeed") + root.as_object_mut().expect("just-created json object") } }; - // Use entry() API for idiomatic insertion let hooks = root_obj .entry("hooks") .or_insert_with(|| serde_json::json!({})) .as_object_mut() - .expect("hooks must be an object"); + .context("hooks value is not an object")?; let pre_tool_use = hooks .entry(PRE_TOOL_USE_KEY) .or_insert_with(|| serde_json::json!([])) .as_array_mut() - .expect("PreToolUse must be an array"); + .context("PreToolUse value is not an array")?; - // Append RTK hook entry pre_tool_use.push(serde_json::json!({ "matcher": "Bash", "hooks": [{ @@ -780,6 +775,7 @@ fn insert_hook_entry(root: &mut serde_json::Value, hook_command: &str) { "command": hook_command }] })); + Ok(()) } /// Check if RTK hook is already present in settings.json @@ -1622,8 +1618,7 @@ fn patch_cursor_hooks_json(path: &Path, verbose: u8) -> Result { return Ok(false); } - // Insert the RTK preToolUse entry - insert_cursor_hook_entry(&mut root); + insert_cursor_hook_entry(&mut root)?; // Backup if exists if path.exists() { @@ -1664,35 +1659,34 @@ fn cursor_hook_already_present(root: &serde_json::Value) -> bool { } /// Insert RTK preToolUse entry into Cursor hooks.json -fn insert_cursor_hook_entry(root: &mut serde_json::Value) { +fn insert_cursor_hook_entry(root: &mut serde_json::Value) -> Result<()> { let root_obj = match root.as_object_mut() { Some(obj) => obj, None => { *root = serde_json::json!({ "version": 1 }); - root.as_object_mut() - .expect("Just created object, must succeed") + root.as_object_mut().expect("just-created json object") } }; - // Ensure version key root_obj.entry("version").or_insert(serde_json::json!(1)); let hooks = root_obj .entry("hooks") .or_insert_with(|| serde_json::json!({})) .as_object_mut() - .expect("hooks must be an object"); + .context("hooks value is not an object")?; let pre_tool_use = hooks .entry("preToolUse") .or_insert_with(|| serde_json::json!([])) .as_array_mut() - .expect("preToolUse must be an array"); + .context("preToolUse value is not an array")?; pre_tool_use.push(serde_json::json!({ "command": CURSOR_HOOK_COMMAND, "matcher": "Shell" })); + Ok(()) } /// Remove Cursor RTK artifacts: hook script + hooks.json entry @@ -2745,7 +2739,7 @@ More notes let mut json_content = serde_json::json!({}); let hook_command = "/Users/test/.claude/hooks/rtk-rewrite.sh"; - insert_hook_entry(&mut json_content, hook_command); + insert_hook_entry(&mut json_content, hook_command).unwrap(); // Should create full structure assert!(json_content.get("hooks").is_some()); @@ -2777,7 +2771,7 @@ More notes }); let hook_command = "/Users/test/.claude/hooks/rtk-rewrite.sh"; - insert_hook_entry(&mut json_content, hook_command); + insert_hook_entry(&mut json_content, hook_command).unwrap(); let pre_tool_use = json_content["hooks"]["PreToolUse"].as_array().unwrap(); assert_eq!(pre_tool_use.len(), 2); // Should have both hooks @@ -2800,7 +2794,7 @@ More notes }); let hook_command = "/Users/test/.claude/hooks/rtk-rewrite.sh"; - insert_hook_entry(&mut json_content, hook_command); + insert_hook_entry(&mut json_content, hook_command).unwrap(); // Should preserve all other keys assert_eq!(json_content["env"]["PATH"], "/custom/path"); @@ -3003,7 +2997,7 @@ More notes #[test] fn test_insert_cursor_hook_entry_empty() { let mut json_content = serde_json::json!({ "version": 1 }); - insert_cursor_hook_entry(&mut json_content); + insert_cursor_hook_entry(&mut json_content).unwrap(); let hooks = json_content["hooks"]["preToolUse"].as_array().unwrap(); assert_eq!(hooks.len(), 1); @@ -3027,7 +3021,7 @@ More notes } }); - insert_cursor_hook_entry(&mut json_content); + insert_cursor_hook_entry(&mut json_content).unwrap(); let pre_tool_use = json_content["hooks"]["preToolUse"].as_array().unwrap(); assert_eq!(pre_tool_use.len(), 2); diff --git a/src/hooks/permissions.rs b/src/hooks/permissions.rs index 6d7249751..e189d8aca 100644 --- a/src/hooks/permissions.rs +++ b/src/hooks/permissions.rs @@ -98,6 +98,10 @@ fn load_permission_rules() -> (Vec, Vec, Vec) { continue; }; let Ok(json) = serde_json::from_str::(&content) else { + eprintln!( + "[rtk] warning: failed to parse permissions from {}", + path.display() + ); continue; }; let Some(permissions) = json.get("permissions") else { From 76f3b24074fe87b8209b68713cb5a0a7d0b2ed98 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 6 Apr 2026 10:54:48 +0000 Subject: [PATCH 09/44] chore(master): release 0.35.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 39 +++++++++++++++++++++++++++++++++++ Cargo.lock | 2 +- Cargo.toml | 2 +- 4 files changed, 42 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b9091c583..3a39fd8cf 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.34.3" + ".": "0.35.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 98628683e..8f6bf6e54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,45 @@ All notable changes to rtk (Rust Token Killer) will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.35.0](https://github.com/rtk-ai/rtk/compare/v0.34.3...v0.35.0) (2026-04-06) + + +### Features + +* **aws:** expand CLI filters from 8 to 25 subcommands ([402c48e](https://github.com/rtk-ai/rtk/commit/402c48e66988e638a5b4f4dd193238fc1d0fe18f)) + + +### Bug Fixes + +* **cmd:** read/cat multiple file and consistent behavior ([3f58018](https://github.com/rtk-ai/rtk/commit/3f58018f4af1d7206457929cf80bb4534203c3ee)) +* **docs:** clean some docs + disclaimer ([deda44f](https://github.com/rtk-ai/rtk/commit/deda44f73607981f3d27ecc6341ce927aab34d37)) +* **gh:** pass through gh pr merge instead of canned response ([#938](https://github.com/rtk-ai/rtk/issues/938)) ([8465ca9](https://github.com/rtk-ai/rtk/commit/8465ca953fa9d70dcc971a941c19465d456eb7d4)) +* **gh:** pass through gh pr merge instead of canned response ([#938](https://github.com/rtk-ai/rtk/issues/938)) ([e1f2845](https://github.com/rtk-ai/rtk/commit/e1f2845df06a8d8b8325945dc4940ec5f530e4cc)) +* **git:** inherit stdin for commit and push to preserve SSH signing ([#733](https://github.com/rtk-ai/rtk/issues/733)) ([eefeae4](https://github.com/rtk-ai/rtk/commit/eefeae45656ff2607c3f519c8eae235e3f0fe411)) +* **git:** inherit stdin for commit and push to preserve SSH signing ([#733](https://github.com/rtk-ai/rtk/issues/733)) ([6cee6c6](https://github.com/rtk-ai/rtk/commit/6cee6c60b80f914ed9505e3925d85cadec43ab97)) +* **git:** preserve full diff hunk headers ([62f4452](https://github.com/rtk-ai/rtk/commit/62f445227679f3df293fe35e9b18cc5ab39d7963)) +* **git:** preserve full diff hunk headers ([09b3ff9](https://github.com/rtk-ai/rtk/commit/09b3ff9424e055f5fe25e535e5b60e077f8344f9)) +* **go:** avoid false build errors from download logs ([9c1cf2f](https://github.com/rtk-ai/rtk/commit/9c1cf2f403534fa7874638b1b983c2d7f918a185)) +* **go:** avoid false build errors from download logs ([d44fd3e](https://github.com/rtk-ai/rtk/commit/d44fd3e034208e3bcd59c2c46f7720eec4f10c98)) +* **go:** cover more build failure shapes ([2425ad6](https://github.com/rtk-ai/rtk/commit/2425ad68e5386d19e5ec9ff1ca151a6d2c9a56d3)) +* **go:** preserve failing test location context ([1481bc5](https://github.com/rtk-ai/rtk/commit/1481bc590924031456a6022510275c29c09e330e)) +* **go:** preserve failing test location context ([374fe64](https://github.com/rtk-ai/rtk/commit/374fe64cfbedcd676733973e81a63a6dfecbb1b7)) +* **go:** restore build error coverage ([1177c9c](https://github.com/rtk-ai/rtk/commit/1177c9c873ac63b6c0bcc9e1b664a705baa0ad7a)) +* **grep:** close subprocess stdin to prevent memory leak ([#897](https://github.com/rtk-ai/rtk/issues/897)) ([7217562](https://github.com/rtk-ai/rtk/commit/72175623551f40b581b4a7f6ed966c1e4a9c7358)) +* **grep:** close subprocess stdin to prevent memory leak ([#897](https://github.com/rtk-ai/rtk/issues/897)) ([09979cf](https://github.com/rtk-ai/rtk/commit/09979cf29701a1b775bcac761d24ec0e055d1bec)) +* **hook_check:** detect missing integrations ([9cf9ccc](https://github.com/rtk-ai/rtk/commit/9cf9ccc1ac39f8bba37e932c7d318a3aa7a34ae9)) +* **init:** remove opt-out instruction from telemetry message ([7571c8e](https://github.com/rtk-ai/rtk/commit/7571c8e101c41ee64c51e2bd64697f85f9142423)) +* **init:** remove telemetry info lines from init output ([7dbef2c](https://github.com/rtk-ai/rtk/commit/7dbef2ce00824d26f2057e4c3c76e429e2e23088)) +* **main:** kill zombie processes + path for rtk md ([d16fc6d](https://github.com/rtk-ai/rtk/commit/d16fc6dacbfec912c21522939b15b7bbd9719487)) +* **main:** kill zombie processes + path for rtk md + missing intergrations ([a919335](https://github.com/rtk-ai/rtk/commit/a919335519ed4a5259a212e56407cb312aa99bac)) +* **merge:** changelog conflicts ([d92c5d2](https://github.com/rtk-ai/rtk/commit/d92c5d264a49483c8d6079e04d946a79bc990a74)) +* **proxy:** kill child process on SIGINT/SIGTERM to prevent orphans ([d813919](https://github.com/rtk-ai/rtk/commit/d813919a24546e044e7844fc7ed05fef4ec24033)) +* **proxy:** kill child process on SIGINT/SIGTERM to prevent orphans ([3318510](https://github.com/rtk-ai/rtk/commit/33185101fc122d0c11a25a4e02ac9f3a7dc7e3bb)) +* **review:** address ChildGuard disarm, stdin dedup, hook masking ([d85fe33](https://github.com/rtk-ai/rtk/commit/d85fe3384b87c16fafd25ec7bcadbff6e69f3f1f)) +* **security:** default to ask when no permission rule matches ([#886](https://github.com/rtk-ai/rtk/issues/886)) ([158c745](https://github.com/rtk-ai/rtk/commit/158c74527f6591d372e40a78cd604d73a20649a9)) +* **security:** default to ask when no permission rule matches ([#886](https://github.com/rtk-ai/rtk/issues/886)) ([41a6c6b](https://github.com/rtk-ai/rtk/commit/41a6c6bf6da78a4754794fdc6a1469df2e327920)) +* **tracking:** use std::env::temp_dir() for compatibility (instead of unix tmp) ([e918661](https://github.com/rtk-ai/rtk/commit/e918661440d7b50321f0535032f52c5e87aaf3cb)) + ## [Unreleased] ### Features diff --git a/Cargo.lock b/Cargo.lock index e1309d2eb..64aa3e9dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -903,7 +903,7 @@ dependencies = [ [[package]] name = "rtk" -version = "0.34.3" +version = "0.35.0" dependencies = [ "anyhow", "automod", diff --git a/Cargo.toml b/Cargo.toml index 68ed2aaf6..69beeaa53 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rtk" -version = "0.34.3" +version = "0.35.0" edition = "2021" authors = ["Patrick Szymkowiak"] description = "Rust Token Killer - High-performance CLI proxy to minimize LLM token consumption" From b3936b8e5d5fc2b5036b90aada61917fa3f3f7de Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Thu, 9 Apr 2026 20:39:30 +0200 Subject: [PATCH 10/44] fix(stream): stream engine & template + BlockHandler, RegexBlockFilter, signal guard, pipe rewrite --- Cargo.toml | 1 + src/cmds/README.md | 128 ++++++++++-- src/cmds/js/tsc_cmd.rs | 122 ++++++++++- src/cmds/rust/cargo_cmd.rs | 378 +++++++++++++++++++++++++++++++++- src/cmds/rust/runner.rs | 205 ++++++++++--------- src/core/runner.rs | 186 +++++++++++------ src/core/stream.rs | 410 ++++++++++++++++++++++++++++++++----- src/discover/registry.rs | 81 +++++++- src/hooks/hook_cmd.rs | 5 +- src/main.rs | 33 ++- 10 files changed, 1313 insertions(+), 236 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 22b999a88..ce153593a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,6 +34,7 @@ flate2 = "1.0" quick-xml = "0.37" which = "8" automod = "1" +libc = "0.2" [build-dependencies] toml = "0.8" diff --git a/src/cmds/README.md b/src/cmds/README.md index 5e0f633b3..9ca7b2821 100644 --- a/src/cmds/README.md +++ b/src/cmds/README.md @@ -35,19 +35,19 @@ Each subdirectory has its own README with file descriptions, parsing strategies, - **[`system/`](system/README.md)** — ls, tree, read, grep, find, wc, env, json, log, deps, summary, format, smart — format_cmd routing, filter levels, language detection - **[`ruby/`](ruby/README.md)** — rake/rails test, rspec, rubocop — JSON injection pattern, `ruby_exec()` bundle exec auto-detection -## Execution Flow: `runner::run_filtered()` +## Execution Flow -The shared wrapper in [`core/runner.rs`](../core/runner.rs) encapsulates the six-phase execution skeleton. Modules build the `Command` (custom arg logic), then delegate to `run_filtered()` for everything else. +The shared wrappers in [`core/runner.rs`](../core/runner.rs) encapsulate the execution skeleton. Modules build the `Command` (custom arg logic), then delegate to a runner entry point. All runners handle tracking, tee recovery, and exit code propagation automatically. ``` - cmd.output() Filter applied to tee_and_hint() - | stdout or combined | - v | v + run_streaming() Filter applied tee_and_hint() + | (per-line or post-hoc) | + v | v +---------+ stdout +-------+-------+ filtered +-------+ - | Execute |--------->| filter_fn() |----------->| Print | + | Spawn |--------->| filter |----------->| Print | +---------+ stderr +---------------+ +-------+ - | | - v v + | (live) | + v v +----------+ +---------+ | raw = | | Track | | stdout + | | savings | @@ -60,14 +60,33 @@ The shared wrapper in [`core/runner.rs`](../core/runner.rs) encapsulates the six +-----------+ ``` -**Six phases in order:** +### Filter modes -1. **Execute** — `cmd.output()` captures stdout + stderr -2. **Filter** — `filter_fn` receives stdout-only or combined, returns compressed string -3. **Print** — filtered output printed; if tee enabled, appends recovery hint on failure -4. **Stderr passthrough** — when `filter_stdout_only`: stderr printed via `eprintln!()` unconditionally -5. **Track** — `timer.track()` records raw vs filtered for token savings -6. **Exit code** — returns `Ok(exit_code)` to caller; `main.rs` calls `process::exit(code)` once +All execution goes through `core::stream::run_streaming()` with one of four `FilterMode` variants. The runner entry points (`run_filtered`, `run_streamed`, `run_passthrough`) select the appropriate mode automatically — module authors don't interact with `FilterMode` directly. + +| FilterMode | How it works | Used by | +|------------|-------------|---------| +| **`CaptureOnly`** | Buffers all stdout silently, then passes the full string to `filter_fn` post-hoc. Stderr streams to terminal in real time. | `run_filtered()` (default path) | +| **`Buffered`** | Buffers all stdout, applies filter, then prints the result. Stderr streams live. Chosen automatically by `run_filtered()` when `filter_stdout_only` is set. | `run_filtered()` (stdout-only path) | +| **`Streaming`** | Feeds each stdout line to a `StreamFilter::feed_line()` as it arrives. Emitted lines print immediately. Calls `flush()` after process exits for final output. | `run_streamed()` | +| **`Passthrough`** | Inherits the parent TTY directly — no piping, no buffering. `raw`/`filtered` are empty. | `run_passthrough()` | + +### When to use which + +| Scenario | Runner | FilterMode | Why | +|----------|--------|------------|-----| +| Parse structured output (JSON, tables) | `run_filtered()` | CaptureOnly/Buffered | Filter needs full text to parse structure | +| Long-running, line-parseable output | `run_streamed()` | Streaming | Low memory, real-time output | +| No filtering, just track usage | `run_passthrough()` | Passthrough | Zero overhead, inherits TTY | +| Custom logic (multi-command, file I/O) | Manual with `exec_capture()` | CaptureOnly | Full control over execution | + +### Phases + +1. **Spawn** — `run_streaming()` starts the child process with piped stdout/stderr (or inherited TTY for Passthrough) +2. **Filter** — stdout is processed per the FilterMode; stderr is forwarded to the terminal in real time via a dedicated reader thread +3. **Print** — filtered output is written to stdout (live for Streaming, post-hoc for CaptureOnly/Buffered); if tee enabled, appends recovery hint on failure +4. **Track** — `timer.track()` records raw vs filtered for token savings +5. **Exit code** — returns `Ok(exit_code)` to caller; `main.rs` calls `process::exit(code)` once **`RunOptions` builder:** @@ -96,14 +115,85 @@ pub fn run(args: &[String], verbose: u8) -> Result { Exit code handling is **fully automatic** when using `run_filtered()` — the wrapper extracts the exit code (including Unix signal handling via 128+signal), tracks savings, and returns `Ok(exit_code)`. Module authors just return the result. +**Streaming filters (line-by-line):** + +Use `runner::run_streamed()` when the command is long-running or produces unbounded output that should be filtered line-by-line. Three levels of abstraction, from simplest to most flexible: + +**Level 1: `RegexBlockFilter`** — regex start pattern + indent continuation (3-5 lines) + +For block-based errors where blocks start with a regex match and continue on indented lines. Handles skip prefixes, block counting, and summary automatically. + +```rust +use crate::core::stream::{BlockStreamFilter, RegexBlockFilter}; + +pub fn run(args: &[String], verbose: u8) -> Result { + let mut cmd = resolved_command("mycmd"); + for arg in args { cmd.arg(arg); } + + let filter = RegexBlockFilter::new("mycmd", r"^error\[") + .skip_prefixes(&["warning:", "note:"]); + + runner::run_streamed( + cmd, "mycmd", &args.join(" "), + Box::new(BlockStreamFilter::new(filter)), + runner::RunOptions::with_tee("mycmd"), + ) +} +``` + +`RegexBlockFilter` provides: regex-based block start detection, indent-based continuation (space/tab), configurable line skipping via prefixes, and automatic summary (`"mycmd: 3 blocks in output"` or `"mycmd: no errors found"`). + +**Level 2: `BlockHandler` trait** — custom block detection with state tracking + +When you need custom block start/continuation logic or stateful parsing beyond regex + indent. Implement the `BlockHandler` trait and wrap in `BlockStreamFilter`. + +```rust +use crate::core::stream::{BlockHandler, BlockStreamFilter}; + +struct MyHandler { error_count: usize } + +impl BlockHandler for MyHandler { + fn should_skip(&mut self, line: &str) -> bool { line.is_empty() } + fn is_block_start(&mut self, line: &str) -> bool { + if line.starts_with("FAIL") { self.error_count += 1; true } else { false } + } + fn is_block_continuation(&mut self, line: &str, _block: &[String]) -> bool { + line.starts_with(" ") || line.starts_with("at ") + } + fn format_summary(&self, _exit_code: i32, _raw: &str) -> Option { + Some(format!("{} failures\n", self.error_count)) + } +} +``` + +See `cmds/rust/cargo_cmd.rs::CargoBuildHandler` and `cmds/js/tsc_cmd.rs::TscHandler` for production examples. + +**Level 3: `StreamFilter` trait** — full line-by-line control + +When block-based parsing doesn't fit (e.g., state machines, multi-phase output, line transforms). Implement `StreamFilter` directly. + +```rust +use crate::core::stream::StreamFilter; + +struct MyFilter { state: State } + +impl StreamFilter for MyFilter { + fn feed_line(&mut self, line: &str) -> Option { + // Return Some(text) to emit, None to suppress + if line.contains("error") { Some(format!("{}\n", line)) } else { None } + } + fn flush(&mut self) -> String { String::new() } + fn on_exit(&mut self, exit_code: i32, raw: &str) -> Option { None } +} +``` + +See `cmds/rust/runner.rs::ErrorStreamFilter` for a complete reference implementation (state machine that tracks error blocks across lines). + **Example — passthrough command (no filtering):** ```rust pub fn run_passthrough(args: &[OsString], verbose: u8) -> Result { - let status = resolved_command("mycmd").args(args) - .stdin(Stdio::inherit()).stdout(Stdio::inherit()).stderr(Stdio::inherit()) - .status().context("Failed to run mycmd")?; - Ok(exit_code_from_status(&status, "mycmd")) + runner::run_passthrough("mycmd", args, verbose) } ``` diff --git a/src/cmds/js/tsc_cmd.rs b/src/cmds/js/tsc_cmd.rs index 20d1e7aba..6c1f23cb5 100644 --- a/src/cmds/js/tsc_cmd.rs +++ b/src/cmds/js/tsc_cmd.rs @@ -1,13 +1,13 @@ //! Filters TypeScript compiler errors, grouping them by file and error code. use crate::core::runner; +use crate::core::stream::{BlockHandler, BlockStreamFilter}; use crate::core::utils::{resolved_command, tool_exists, truncate}; use anyhow::Result; use regex::Regex; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; pub fn run(args: &[String], verbose: u8) -> Result { - // Try tsc directly first, fallback to npx if not found let tsc_exists = tool_exists("tsc"); let mut cmd = if tsc_exists { @@ -27,15 +27,82 @@ pub fn run(args: &[String], verbose: u8) -> Result { eprintln!("Running: {} {}", tool, args.join(" ")); } - runner::run_filtered( + runner::run_streamed( cmd, "tsc", &args.join(" "), - |raw| filter_tsc_output(raw), + Box::new(BlockStreamFilter::new(TscHandler::new())), runner::RunOptions::with_tee("tsc"), ) } +struct TscHandler { + error_count: usize, + files: HashSet, + code_counts: HashMap, +} + +impl TscHandler { + fn new() -> Self { + Self { + error_count: 0, + files: HashSet::new(), + code_counts: HashMap::new(), + } + } +} + +impl BlockHandler for TscHandler { + fn should_skip(&mut self, line: &str) -> bool { + line.starts_with("Found ") + } + + fn is_block_start(&mut self, line: &str) -> bool { + lazy_static::lazy_static! { + static ref TSC_ERROR: Regex = Regex::new( + r"^(.+?)\((\d+),(\d+)\):\s+(error|warning)\s+(TS\d+):\s+(.+)$" + ).unwrap(); + } + if let Some(caps) = TSC_ERROR.captures(line) { + self.error_count += 1; + self.files.insert(caps[1].to_string()); + *self.code_counts.entry(caps[5].to_string()).or_insert(0) += 1; + true + } else { + false + } + } + + fn is_block_continuation(&mut self, line: &str, _block: &[String]) -> bool { + line.starts_with(" ") || line.starts_with('\t') + } + + fn format_summary(&self, _exit_code: i32, _raw: &str) -> Option { + if self.error_count == 0 { + return Some("TypeScript: No errors found\n".to_string()); + } + + let mut result = format!( + "═══════════════════════════════════════\nTypeScript: {} errors in {} files\n", + self.error_count, + self.files.len() + ); + + if self.code_counts.len() > 1 { + let mut counts: Vec<_> = self.code_counts.iter().collect(); + counts.sort_by(|a, b| b.1.cmp(a.1)); + let codes_str: Vec = counts + .iter() + .take(5) + .map(|(code, count)| format!("{} ({}x)", code, count)) + .collect(); + result.push_str(&format!("Top codes: {}\n", codes_str.join(", "))); + } + + Some(result) + } +} + pub(crate) fn filter_tsc_output(output: &str) -> String { lazy_static::lazy_static! { // Pattern: src/file.ts(12,5): error TS2322: Type 'string' is not assignable to type 'number'. @@ -232,4 +299,51 @@ src/app.tsx(20,5): error TS2345: Argument of type 'number' is not assignable to let result = filter_tsc_output(output); assert!(result.contains("No errors found")); } + + // --- Streaming handler tests --- + + use crate::core::stream::tests::run_block_filter; + + #[test] + fn test_tsc_stream_errors() { + let input = "\ +src/server/api/auth.ts(12,5): error TS2322: Type 'string' is not assignable to type 'number'. +src/server/api/auth.ts(15,10): error TS2345: Argument of type 'number' is not assignable to parameter of type 'string'. +src/components/Button.tsx(8,3): error TS2339: Property 'onClick' does not exist on type 'ButtonProps'. + +Found 3 errors in 2 files. +"; + let mut f = BlockStreamFilter::new(TscHandler::new()); + let result = run_block_filter(&mut f, input, 1); + assert!(result.contains("TS2322"), "got: {}", result); + assert!(result.contains("TS2345"), "got: {}", result); + assert!(result.contains("3 errors in 2 files"), "got: {}", result); + assert!(!result.contains("Found 3"), "got: {}", result); + } + + #[test] + fn test_tsc_stream_no_errors() { + let input = "Found 0 errors. Watching for file changes.\n"; + let mut f = BlockStreamFilter::new(TscHandler::new()); + let result = run_block_filter(&mut f, input, 0); + assert!(result.contains("No errors found"), "got: {}", result); + } + + #[test] + fn test_tsc_stream_continuation_lines() { + let input = "\ +src/app.tsx(10,3): error TS2322: Type '{ children: Element; }' is not assignable to type 'Props'. + Property 'children' does not exist on type 'Props'. +src/app.tsx(20,5): error TS2345: Argument of type 'number' is not assignable. +"; + let mut f = BlockStreamFilter::new(TscHandler::new()); + let result = run_block_filter(&mut f, input, 1); + assert!( + result.contains("Property 'children' does not exist"), + "got: {}", + result + ); + assert!(result.contains("TS2322"), "got: {}", result); + assert!(result.contains("TS2345"), "got: {}", result); + } } diff --git a/src/cmds/rust/cargo_cmd.rs b/src/cmds/rust/cargo_cmd.rs index 427fed76b..41fa43466 100644 --- a/src/cmds/rust/cargo_cmd.rs +++ b/src/cmds/rust/cargo_cmd.rs @@ -1,6 +1,7 @@ //! Filters cargo output — build errors, test results, clippy warnings. use crate::core::runner; +use crate::core::stream::{BlockHandler, BlockStreamFilter, StreamFilter}; use crate::core::utils::{resolved_command, truncate}; use anyhow::Result; use std::collections::HashMap; @@ -67,6 +68,216 @@ fn restore_double_dash_with_raw(args: &[String], raw_args: &[String]) -> Vec, +} + +impl CargoBuildHandler { + fn new() -> Self { + Self { + compiled: 0, + warnings: 0, + error_count: 0, + finished_line: None, + } + } +} + +impl BlockHandler for CargoBuildHandler { + fn should_skip(&mut self, line: &str) -> bool { + let trimmed = line.trim_start(); + if trimmed.starts_with("Compiling") || trimmed.starts_with("Checking") { + self.compiled += 1; + return true; + } + if trimmed.starts_with("Downloading") || trimmed.starts_with("Downloaded") { + return true; + } + if trimmed.starts_with("Finished") { + self.finished_line = Some(trimmed.to_string()); + return true; + } + if line.starts_with("warning:") + && line.contains("generated") + && line.contains("warning") + { + return true; + } + if (line.starts_with("error:") || line.starts_with("error[")) + && (line.contains("aborting due to") || line.contains("could not compile")) + { + return true; + } + false + } + + fn is_block_start(&mut self, line: &str) -> bool { + if line.starts_with("error[") || line.starts_with("error:") { + self.error_count += 1; + return true; + } + if line.starts_with("warning:") || line.starts_with("warning[") { + self.warnings += 1; + return true; + } + false + } + + fn is_block_continuation(&mut self, line: &str, block: &[String]) -> bool { + !(line.trim().is_empty() && block.len() > 3) + } + + fn format_summary(&self, _exit_code: i32, _raw: &str) -> Option { + if self.error_count == 0 && self.warnings == 0 { + let mut s = format!("cargo build ({} crates compiled)", self.compiled); + if let Some(ref finished) = self.finished_line { + s = format!("{}\n{}", s, finished); + } + Some(format!("{}\n", s)) + } else { + Some(format!( + "═══════════════════════════════════════\ncargo build: {} errors, {} warnings ({} crates)\n", + self.error_count, self.warnings, self.compiled + )) + } + } +} + +struct CargoTestHandler { + in_failure_section: bool, + in_failure_names: bool, + summary_lines: Vec, + has_compile_errors: bool, +} + +impl CargoTestHandler { + fn new() -> Self { + Self { + in_failure_section: false, + in_failure_names: false, + summary_lines: Vec::new(), + has_compile_errors: false, + } + } +} + +impl BlockHandler for CargoTestHandler { + fn should_skip(&mut self, line: &str) -> bool { + let trimmed = line.trim_start(); + if trimmed.starts_with("Compiling") + || trimmed.starts_with("Downloading") + || trimmed.starts_with("Downloaded") + || trimmed.starts_with("Finished") + { + return true; + } + if line.starts_with("running ") { + return true; + } + if line.starts_with("test ") && line.ends_with("... ok") { + return true; + } + // Track compile errors for fallback + if trimmed.starts_with("error[") || trimmed.starts_with("error:") { + self.has_compile_errors = true; + } + // "failures:" toggles section state + if line == "failures:" { + if self.in_failure_section { + // Second "failures:" = list of failure names — skip them + self.in_failure_names = true; + } + self.in_failure_section = true; + return true; + } + // Skip the failure name listing section + if self.in_failure_names { + if line.starts_with("test result:") { + self.in_failure_names = false; + self.in_failure_section = false; + self.summary_lines.push(line.to_string()); + return true; + } + return true; + } + if line.starts_with("test result:") { + self.summary_lines.push(line.to_string()); + self.in_failure_section = false; + return true; + } + false + } + + fn is_block_start(&mut self, line: &str) -> bool { + self.in_failure_section && line.starts_with("---- ") + } + + fn is_block_continuation(&mut self, line: &str, _block: &[String]) -> bool { + self.in_failure_section && !line.starts_with("---- ") + } + + fn format_summary(&self, _exit_code: i32, raw: &str) -> Option { + if self.summary_lines.is_empty() && self.has_compile_errors { + let build_filtered = filter_cargo_build(raw); + if build_filtered.starts_with("cargo build:") { + return Some(format!( + "{}\n", + build_filtered.replacen("cargo build:", "cargo test:", 1) + )); + } + // Fallback: last 5 meaningful lines + let meaningful: Vec<&str> = raw + .lines() + .filter(|l| !l.trim().is_empty() && !l.trim_start().starts_with("Compiling")) + .collect(); + let last5: Vec<&str> = meaningful.iter().rev().take(5).rev().copied().collect(); + return Some(format!("{}\n", last5.join("\n"))); + } + + // No failures emitted — aggregate pass results + let mut aggregated: Option = None; + let mut all_parsed = true; + + for line in &self.summary_lines { + if let Some(parsed) = AggregatedTestResult::parse_line(line) { + if let Some(ref mut agg) = aggregated { + agg.merge(&parsed); + } else { + aggregated = Some(parsed); + } + } else { + all_parsed = false; + break; + } + } + + if all_parsed { + if let Some(agg) = aggregated { + if agg.suites > 0 { + return Some(format!("{}\n", agg.format_compact())); + } + } + } + + // Fallback: show raw summary lines + if !self.summary_lines.is_empty() { + let mut s = String::new(); + for line in &self.summary_lines { + s.push_str(line); + s.push('\n'); + } + return Some(s); + } + + None + } +} + /// Generic cargo command runner with filtering. /// Builds the Command with restored `--` separator, then delegates to shared runner. fn run_cargo_filtered( @@ -99,12 +310,49 @@ where ) } +fn run_cargo_streamed( + subcommand: &str, + args: &[String], + verbose: u8, + filter: Box, +) -> Result { + let mut cmd = resolved_command("cargo"); + cmd.arg(subcommand); + + let restored_args = restore_double_dash(args); + for arg in &restored_args { + cmd.arg(arg); + } + + if verbose > 0 { + eprintln!("Running: cargo {} {}", subcommand, restored_args.join(" ")); + } + + runner::run_streamed( + cmd, + &format!("cargo {}", subcommand), + &restored_args.join(" "), + filter, + runner::RunOptions::with_tee(&format!("cargo_{}", subcommand)), + ) +} + fn run_build(args: &[String], verbose: u8) -> Result { - run_cargo_filtered("build", args, verbose, filter_cargo_build) + run_cargo_streamed( + "build", + args, + verbose, + Box::new(BlockStreamFilter::new(CargoBuildHandler::new())), + ) } fn run_test(args: &[String], verbose: u8) -> Result { - run_cargo_filtered("test", args, verbose, filter_cargo_test) + run_cargo_streamed( + "test", + args, + verbose, + Box::new(BlockStreamFilter::new(CargoTestHandler::new())), + ) } fn run_clippy(args: &[String], verbose: u8) -> Result { @@ -112,7 +360,12 @@ fn run_clippy(args: &[String], verbose: u8) -> Result { } fn run_check(args: &[String], verbose: u8) -> Result { - run_cargo_filtered("check", args, verbose, filter_cargo_build) + run_cargo_streamed( + "check", + args, + verbose, + Box::new(BlockStreamFilter::new(CargoBuildHandler::new())), + ) } fn run_install(args: &[String], verbose: u8) -> Result { @@ -537,7 +790,6 @@ fn filter_cargo_nextest(output: &str) -> String { String::new() } -/// Filter cargo build/check output - strip "Compiling"/"Checking" lines, keep errors + summary fn filter_cargo_build(output: &str) -> String { let mut errors: Vec = Vec::new(); let mut warnings = 0; @@ -1788,4 +2040,122 @@ error: test run failed result ); } + + // --- Streaming handler tests --- + + use crate::core::stream::tests::run_block_filter; + + #[test] + fn test_cargo_build_stream_success() { + let input = " Compiling libc v0.2.153\n Compiling cfg-if v1.0.0\n Compiling rtk v0.5.0\n Finished dev [unoptimized + debuginfo] target(s) in 15.23s\n"; + let mut f = BlockStreamFilter::new(CargoBuildHandler::new()); + let result = run_block_filter(&mut f, input, 0); + assert!(result.contains("3 crates compiled"), "got: {}", result); + assert!(result.contains("Finished"), "got: {}", result); + assert!(!result.contains("Compiling"), "got: {}", result); + } + + #[test] + fn test_cargo_build_stream_errors() { + let input = r#" Compiling rtk v0.5.0 +error[E0308]: mismatched types + --> src/main.rs:10:5 + | +10| "hello" + | ^^^^^^^ expected `i32`, found `&str` + +error: aborting due to 1 previous error +"#; + let mut f = BlockStreamFilter::new(CargoBuildHandler::new()); + let result = run_block_filter(&mut f, input, 1); + assert!(result.contains("E0308"), "got: {}", result); + assert!(result.contains("mismatched types"), "got: {}", result); + assert!(result.contains("1 errors"), "got: {}", result); + assert!(!result.contains("aborting"), "got: {}", result); + } + + #[test] + fn test_cargo_test_stream_all_pass() { + let input = r#" Compiling rtk v0.5.0 + Finished test [unoptimized + debuginfo] target(s) in 2.53s + Running target/debug/deps/rtk-abc123 + +running 15 tests +test utils::tests::test_truncate_short_string ... ok +test utils::tests::test_truncate_long_string ... ok +test utils::tests::test_strip_ansi_simple ... ok + +test result: ok. 15 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.01s +"#; + let mut f = BlockStreamFilter::new(CargoTestHandler::new()); + let result = run_block_filter(&mut f, input, 0); + assert!( + result.contains("cargo test: 15 passed (1 suite, 0.01s)"), + "got: {}", + result + ); + assert!(!result.contains("Compiling"), "got: {}", result); + } + + #[test] + fn test_cargo_test_stream_failures() { + let input = r#"running 5 tests +test foo::test_a ... ok +test foo::test_b ... FAILED +test foo::test_c ... ok + +failures: + +---- foo::test_b stdout ---- +thread 'foo::test_b' panicked at 'assert_eq!(1, 2)' + +failures: + foo::test_b + +test result: FAILED. 4 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out +"#; + let mut f = BlockStreamFilter::new(CargoTestHandler::new()); + let result = run_block_filter(&mut f, input, 1); + assert!(result.contains("test_b"), "got: {}", result); + assert!(result.contains("panicked"), "got: {}", result); + } + + #[test] + fn test_cargo_test_stream_multi_suite() { + let input = r#" Running unittests src/lib.rs (target/debug/deps/rtk-abc123) + +running 50 tests +test result: ok. 50 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.45s + + Running unittests src/main.rs (target/debug/deps/rtk-def456) + +running 30 tests +test result: ok. 30 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.30s +"#; + let mut f = BlockStreamFilter::new(CargoTestHandler::new()); + let result = run_block_filter(&mut f, input, 0); + assert!( + result.contains("cargo test: 80 passed (2 suites, 0.75s)"), + "got: {}", + result + ); + } + + #[test] + fn test_cargo_test_stream_compile_error() { + let input = r#" Compiling rtk v0.31.0 (/workspace/projects/rtk) +error[E0425]: cannot find value `missing_symbol` in this scope + --> tests/repro_compile_fail.rs:3:13 + | +3 | let _ = missing_symbol; + | ^^^^^^^^^^^^^^ not found in this scope + +For more information about this error, try `rustc --explain E0425`. +error: could not compile `rtk` (test "repro_compile_fail") due to 1 previous error +"#; + let mut f = BlockStreamFilter::new(CargoTestHandler::new()); + let result = run_block_filter(&mut f, input, 1); + assert!(result.contains("cargo test:"), "got: {}", result); + assert!(result.contains("1 errors"), "got: {}", result); + } } diff --git a/src/cmds/rust/runner.rs b/src/cmds/rust/runner.rs index 4a80f2fb6..51b2ba790 100644 --- a/src/cmds/rust/runner.rs +++ b/src/cmds/rust/runner.rs @@ -1,69 +1,105 @@ //! Runs arbitrary commands and captures only stderr or test failures. -use crate::core::stream::{self, FilterMode, StdinMode}; -use crate::core::tracking; -use anyhow::{Context, Result}; +use crate::core::stream::StreamFilter; +use anyhow::Result; +use lazy_static::lazy_static; use regex::Regex; use std::process::Command; -/// Run a command and filter output to show only errors/warnings -pub fn run_err(command: &str, verbose: u8) -> Result { - let timer = tracking::TimedExecution::start(); +lazy_static! { + static ref ERROR_PATTERNS: Vec = vec![ + // Generic errors + Regex::new(r"(?i)^.*error[\s:\[].*$").unwrap(), + Regex::new(r"(?i)^.*\berr\b.*$").unwrap(), + Regex::new(r"(?i)^.*warning[\s:\[].*$").unwrap(), + Regex::new(r"(?i)^.*\bwarn\b.*$").unwrap(), + Regex::new(r"(?i)^.*failed.*$").unwrap(), + Regex::new(r"(?i)^.*failure.*$").unwrap(), + Regex::new(r"(?i)^.*exception.*$").unwrap(), + Regex::new(r"(?i)^.*panic.*$").unwrap(), + // Rust specific + Regex::new(r"^error\[E\d+\]:.*$").unwrap(), + Regex::new(r"^\s*--> .*:\d+:\d+$").unwrap(), + // Python + Regex::new(r"^Traceback.*$").unwrap(), + Regex::new(r#"^\s*File ".*", line \d+.*$"#).unwrap(), + // JavaScript/TypeScript + Regex::new(r"^\s*at .*:\d+:\d+.*$").unwrap(), + // Go + Regex::new(r"^.*\.go:\d+:.*$").unwrap(), + ]; +} - if verbose > 0 { - eprintln!("Running: {}", command); - } +struct ErrorStreamFilter { + in_error_block: bool, + blank_count: usize, + emitted_any: bool, +} - let mut cmd = if cfg!(target_os = "windows") { - let mut c = Command::new("cmd"); - c.args(["/C", command]); - c - } else { - let mut c = Command::new("sh"); - c.args(["-c", command]); - c - }; +impl ErrorStreamFilter { + fn new() -> Self { + Self { + in_error_block: false, + blank_count: 0, + emitted_any: false, + } + } +} - let result = stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly) - .context("Failed to execute command")?; +impl StreamFilter for ErrorStreamFilter { + fn feed_line(&mut self, line: &str) -> Option { + let is_error = ERROR_PATTERNS.iter().any(|p| p.is_match(line)); + if is_error { + self.in_error_block = true; + self.blank_count = 0; + self.emitted_any = true; + Some(format!("{}\n", line)) + } else if self.in_error_block { + if line.trim().is_empty() { + self.blank_count += 1; + if self.blank_count >= 2 { + self.in_error_block = false; + None + } else { + self.emitted_any = true; + Some(format!("{}\n", line)) + } + } else if line.starts_with(' ') || line.starts_with('\t') { + self.blank_count = 0; + self.emitted_any = true; + Some(format!("{}\n", line)) + } else { + self.in_error_block = false; + None + } + } else { + None + } + } - let raw = &result.raw; - let exit_code = result.exit_code; - let filtered = filter_errors(raw); - let mut rtk = String::new(); + fn flush(&mut self) -> String { + String::new() + } - if filtered.is_empty() { + fn on_exit(&mut self, exit_code: i32, raw: &str) -> Option { + if self.emitted_any { + return None; + } if exit_code == 0 { - rtk.push_str("[ok] Command completed successfully (no errors)"); + Some("[ok] Command completed successfully (no errors)".to_string()) } else { - rtk.push_str(&format!("[FAIL] Command failed (exit code: {})\n", exit_code)); + let mut msg = format!("[FAIL] Command failed (exit code: {})\n", exit_code); let lines: Vec<&str> = raw.lines().collect(); for line in lines.iter().rev().take(10).rev() { - rtk.push_str(&format!(" {}\n", line)); + msg.push_str(&format!(" {}\n", line)); } + Some(msg) } - } else { - rtk.push_str(&filtered); - } - - if let Some(hint) = crate::core::tee::tee_and_hint(raw, "err", exit_code) { - println!("{}\n{}", rtk, hint); - } else { - println!("{}", rtk); } - timer.track(command, "rtk run-err", raw, &rtk); - Ok(exit_code) } -/// Run tests and show only failures -pub fn run_test(command: &str, verbose: u8) -> Result { - let timer = tracking::TimedExecution::start(); - - if verbose > 0 { - eprintln!("Running tests: {}", command); - } - - let mut cmd = if cfg!(target_os = "windows") { +fn build_shell_command(command: &str) -> Command { + if cfg!(target_os = "windows") { let mut c = Command::new("cmd"); c.args(["/C", command]); c @@ -71,48 +107,42 @@ pub fn run_test(command: &str, verbose: u8) -> Result { let mut c = Command::new("sh"); c.args(["-c", command]); c - }; - - let result = stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly) - .context("Failed to execute test command")?; + } +} - let raw = &result.raw; - let exit_code = result.exit_code; - let summary = extract_test_summary(raw, command); - if let Some(hint) = crate::core::tee::tee_and_hint(raw, "test", exit_code) { - println!("{}\n{}", summary, hint); - } else { - println!("{}", summary); +/// Run a command and filter output to show only errors/warnings +pub fn run_err(command: &str, verbose: u8) -> Result { + if verbose > 0 { + eprintln!("Running: {}", command); } - timer.track(command, "rtk run-test", raw, &summary); - Ok(exit_code) + let cmd = build_shell_command(command); + crate::core::runner::run_streamed( + cmd, + "err", + command, + Box::new(ErrorStreamFilter::new()), + crate::core::runner::RunOptions::with_tee("err"), + ) } -fn filter_errors(output: &str) -> String { - lazy_static::lazy_static! { - static ref ERROR_PATTERNS: Vec = vec![ - // Generic errors - Regex::new(r"(?i)^.*error[\s:\[].*$").unwrap(), - Regex::new(r"(?i)^.*\berr\b.*$").unwrap(), - Regex::new(r"(?i)^.*warning[\s:\[].*$").unwrap(), - Regex::new(r"(?i)^.*\bwarn\b.*$").unwrap(), - Regex::new(r"(?i)^.*failed.*$").unwrap(), - Regex::new(r"(?i)^.*failure.*$").unwrap(), - Regex::new(r"(?i)^.*exception.*$").unwrap(), - Regex::new(r"(?i)^.*panic.*$").unwrap(), - // Rust specific - Regex::new(r"^error\[E\d+\]:.*$").unwrap(), - Regex::new(r"^\s*--> .*:\d+:\d+$").unwrap(), - // Python - Regex::new(r"^Traceback.*$").unwrap(), - Regex::new(r#"^\s*File ".*", line \d+.*$"#).unwrap(), - // JavaScript/TypeScript - Regex::new(r"^\s*at .*:\d+:\d+.*$").unwrap(), - // Go - Regex::new(r"^.*\.go:\d+:.*$").unwrap(), - ]; +/// Run tests and show only failures +pub fn run_test(command: &str, verbose: u8) -> Result { + if verbose > 0 { + eprintln!("Running tests: {}", command); } + let cmd = build_shell_command(command); + let command_owned = command.to_string(); + crate::core::runner::run_filtered( + cmd, + "test", + command, + move |raw| extract_test_summary(raw, &command_owned), + crate::core::runner::RunOptions::with_tee("test"), + ) +} +#[cfg(test)] +fn filter_errors(output: &str) -> String { let mut result = Vec::new(); let mut in_error_block = false; let mut blank_count = 0; @@ -133,7 +163,6 @@ fn filter_errors(output: &str) -> String { result.push(line.to_string()); } } else if line.starts_with(' ') || line.starts_with('\t') { - // Continuation of error result.push(line.to_string()); blank_count = 0; } else { @@ -149,20 +178,17 @@ fn extract_test_summary(output: &str, command: &str) -> String { let mut result = Vec::new(); let lines: Vec<&str> = output.lines().collect(); - // Detect test framework let is_cargo = command.contains("cargo test"); let is_pytest = command.contains("pytest"); let is_jest = command.contains("jest") || command.contains("npm test") || command.contains("yarn test"); let is_go = command.contains("go test"); - // Collect failures let mut failures = Vec::new(); let mut in_failure = false; let mut failure_lines = Vec::new(); for line in lines.iter() { - // Cargo test if is_cargo { if line.contains("test result:") { result.push(line.to_string()); @@ -178,7 +204,6 @@ fn extract_test_summary(output: &str, command: &str) -> String { } } - // Pytest if is_pytest { if line.contains(" passed") || line.contains(" failed") || line.contains(" error") { result.push(line.to_string()); @@ -188,7 +213,6 @@ fn extract_test_summary(output: &str, command: &str) -> String { } } - // Jest if is_jest { if line.contains("Tests:") || line.contains("Test Suites:") { result.push(line.to_string()); @@ -198,7 +222,6 @@ fn extract_test_summary(output: &str, command: &str) -> String { } } - // Go test if is_go { if line.starts_with("ok") || line.starts_with("FAIL") || line.starts_with("---") { result.push(line.to_string()); @@ -209,7 +232,6 @@ fn extract_test_summary(output: &str, command: &str) -> String { } } - // Build output let mut output = String::new(); if !failures.is_empty() { @@ -229,7 +251,6 @@ fn extract_test_summary(output: &str, command: &str) -> String { output.push_str(&format!(" {}\n", r)); } } else { - // Fallback: show last few lines output.push_str("OUTPUT (last 5 lines):\n"); let start = lines.len().saturating_sub(5); for line in &lines[start..] { diff --git a/src/core/runner.rs b/src/core/runner.rs index 02ce9d41e..cb406da40 100644 --- a/src/core/runner.rs +++ b/src/core/runner.rs @@ -3,9 +3,8 @@ use anyhow::{Context, Result}; use std::process::Command; -use crate::core::stream::{self, FilterMode, StdinMode}; +use crate::core::stream::{self, FilterMode, StdinMode, StreamFilter}; use crate::core::tracking; -use crate::core::utils::exit_code_from_status; pub fn print_with_hint(filtered: &str, raw: &str, tee_label: &str, exit_code: i32) { if let Some(hint) = crate::core::tee::tee_and_hint(raw, tee_label, exit_code) { @@ -54,80 +53,143 @@ impl<'a> RunOptions<'a> { } } -pub fn run_filtered( +pub enum RunMode<'a> { + Filtered(Box String + 'a>), + Streamed(Box), + Passthrough, +} + +pub fn run( mut cmd: Command, tool_name: &str, args_display: &str, + mode: RunMode<'_>, + opts: RunOptions<'_>, +) -> Result { + let timer = tracking::TimedExecution::start(); + let cmd_label = format!("{} {}", tool_name, args_display); + + match mode { + RunMode::Filtered(filter_fn) => { + let result = stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly) + .with_context(|| format!("Failed to run {}", tool_name))?; + + let exit_code = result.exit_code; + let raw = &result.raw; + let raw_stdout = &result.raw_stdout; + + if opts.skip_filter_on_failure && exit_code != 0 { + timer.track(&cmd_label, &format!("rtk {}", cmd_label), raw, raw); + return Ok(exit_code); + } + + let text_to_filter = if opts.filter_stdout_only { + raw_stdout + } else { + raw + }; + let filtered = filter_fn(text_to_filter); + + if let Some(label) = opts.tee_label { + print_with_hint(&filtered, raw, label, exit_code); + } else if opts.no_trailing_newline { + print!("{}", filtered); + } else { + println!("{}", filtered); + } + + let raw_for_tracking = if opts.filter_stdout_only { + raw_stdout + } else { + raw + }; + timer.track( + &cmd_label, + &format!("rtk {}", cmd_label), + raw_for_tracking, + &filtered, + ); + Ok(exit_code) + } + RunMode::Streamed(filter) => { + let result = + stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::Streaming(filter)) + .with_context(|| format!("Failed to run {}", tool_name))?; + + if let Some(label) = opts.tee_label { + if let Some(hint) = + crate::core::tee::tee_and_hint(&result.raw, label, result.exit_code) + { + println!("{}", hint); + } + } + + timer.track( + &cmd_label, + &format!("rtk {}", cmd_label), + &result.raw, + &result.filtered, + ); + Ok(result.exit_code) + } + RunMode::Passthrough => { + let result = + stream::run_streaming(&mut cmd, StdinMode::Inherit, FilterMode::Passthrough) + .with_context(|| format!("Failed to run {}", tool_name))?; + + timer.track_passthrough(&cmd_label, &format!("rtk {} (passthrough)", cmd_label)); + Ok(result.exit_code) + } + } +} + +pub fn run_filtered( + cmd: Command, + tool_name: &str, + args_display: &str, filter_fn: F, opts: RunOptions<'_>, ) -> Result where F: Fn(&str) -> String, { - let timer = tracking::TimedExecution::start(); - - // CaptureOnly: stderr streams live, stdout buffered silently. - // result.filtered = raw_stdout, result.raw = stdout + stderr - let result = stream::run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly) - .with_context(|| format!("Failed to run {}", tool_name))?; - - let exit_code = result.exit_code; - let raw_stdout = &result.filtered; - let raw = &result.raw; - - if opts.skip_filter_on_failure && exit_code != 0 { - timer.track( - &format!("{} {}", tool_name, args_display), - &format!("rtk {} {}", tool_name, args_display), - raw, - raw, - ); - return Ok(exit_code); - } - - let text_to_filter = if opts.filter_stdout_only { - raw_stdout - } else { - raw - }; - let filtered = filter_fn(text_to_filter); - - if let Some(label) = opts.tee_label { - print_with_hint(&filtered, raw, label, exit_code); - } else if opts.no_trailing_newline { - print!("{}", filtered); - } else { - println!("{}", filtered); - } - - let raw_for_tracking = if opts.filter_stdout_only { - raw_stdout - } else { - raw - }; - timer.track( - &format!("{} {}", tool_name, args_display), - &format!("rtk {} {}", tool_name, args_display), - raw_for_tracking, - &filtered, - ); - - Ok(exit_code) + run( + cmd, + tool_name, + args_display, + RunMode::Filtered(Box::new(filter_fn)), + opts, + ) } pub fn run_passthrough(tool: &str, args: &[std::ffi::OsString], verbose: u8) -> Result { - let timer = tracking::TimedExecution::start(); if verbose > 0 { eprintln!("{} passthrough: {:?}", tool, args); } - let status = crate::core::utils::resolved_command(tool) - .args(args) - .status() - .with_context(|| format!("Failed to run {}", tool))?; + let mut cmd = crate::core::utils::resolved_command(tool); + cmd.args(args); let args_str = tracking::args_display(args); - timer.track_passthrough( - &format!("{} {}", tool, args_str), - &format!("rtk {} {} (passthrough)", tool, args_str), - ); - Ok(exit_code_from_status(&status, tool)) + run( + cmd, + tool, + &args_str, + RunMode::Passthrough, + RunOptions::default(), + ) +} + +pub fn run_streamed( + cmd: Command, + tool_name: &str, + args_display: &str, + filter: Box, + opts: RunOptions<'_>, +) -> Result { + run( + cmd, + tool_name, + args_display, + RunMode::Streamed(filter), + opts, + ) } diff --git a/src/core/stream.rs b/src/core/stream.rs index b9137ac44..7e0dcba15 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -1,10 +1,149 @@ use anyhow::{Context, Result}; +use regex::Regex; use std::io::{self, BufRead, BufReader, BufWriter, Write}; use std::process::{Command, Stdio}; pub trait StreamFilter { fn feed_line(&mut self, line: &str) -> Option; fn flush(&mut self) -> String; + fn on_exit(&mut self, _exit_code: i32, _raw: &str) -> Option { + None + } +} + +pub trait BlockHandler { + fn should_skip(&mut self, line: &str) -> bool; + fn is_block_start(&mut self, line: &str) -> bool; + fn is_block_continuation(&mut self, line: &str, block: &[String]) -> bool; + fn format_summary(&self, exit_code: i32, raw: &str) -> Option; +} + +pub struct BlockStreamFilter { + handler: H, + in_block: bool, + current_block: Vec, + blocks_emitted: usize, +} + +impl BlockStreamFilter { + pub fn new(handler: H) -> Self { + Self { + handler, + in_block: false, + current_block: Vec::new(), + blocks_emitted: 0, + } + } + + fn emit_block(&mut self) -> Option { + if self.current_block.is_empty() { + return None; + } + let block = self.current_block.join("\n"); + self.current_block.clear(); + self.blocks_emitted += 1; + Some(format!("{}\n", block)) + } +} + +impl StreamFilter for BlockStreamFilter { + fn feed_line(&mut self, line: &str) -> Option { + if self.handler.should_skip(line) { + return None; + } + + if self.handler.is_block_start(line) { + let prev = self.emit_block(); + self.current_block.push(line.to_string()); + self.in_block = true; + prev + } else if self.in_block { + if self + .handler + .is_block_continuation(line, &self.current_block) + { + self.current_block.push(line.to_string()); + None + } else { + self.in_block = false; + self.emit_block() + } + } else { + None + } + } + + fn flush(&mut self) -> String { + self.emit_block().unwrap_or_default() + } + + fn on_exit(&mut self, exit_code: i32, raw: &str) -> Option { + self.handler.format_summary(exit_code, raw) + } +} + +#[allow(dead_code)] // available for command modules; currently used in tests only +pub struct RegexBlockFilter { + start_re: Regex, + skip_prefixes: Vec, + tool_name: String, + block_count: usize, +} + +impl RegexBlockFilter { + pub fn new(tool_name: &str, start_pattern: &str) -> Self { + Self { + start_re: Regex::new(start_pattern).unwrap_or_else(|e| { + panic!("RegexBlockFilter: bad pattern '{}': {}", start_pattern, e) + }), + skip_prefixes: Vec::new(), + tool_name: tool_name.to_string(), + block_count: 0, + } + } + + #[allow(dead_code)] + pub fn skip_prefix(mut self, prefix: &str) -> Self { + self.skip_prefixes.push(prefix.to_string()); + self + } + + #[allow(dead_code)] + pub fn skip_prefixes(mut self, prefixes: &[&str]) -> Self { + self.skip_prefixes + .extend(prefixes.iter().map(|s| s.to_string())); + self + } +} + +impl BlockHandler for RegexBlockFilter { + fn should_skip(&mut self, line: &str) -> bool { + self.skip_prefixes.iter().any(|p| line.starts_with(p)) + } + + fn is_block_start(&mut self, line: &str) -> bool { + if self.start_re.is_match(line) { + self.block_count += 1; + true + } else { + false + } + } + + fn is_block_continuation(&mut self, line: &str, _block: &[String]) -> bool { + line.starts_with(' ') || line.starts_with('\t') + } + + fn format_summary(&self, _exit_code: i32, _raw: &str) -> Option { + if self.block_count == 0 { + Some(format!("{}: no errors found\n", self.tool_name)) + } else { + Some(format!( + "{}: {} blocks in output\n", + self.tool_name, self.block_count + )) + } + } } pub trait StdinFilter: Send { @@ -12,10 +151,12 @@ pub trait StdinFilter: Send { fn flush(&mut self) -> String; } +#[allow(dead_code)] // test utility: wraps closures as StreamFilter pub struct LineFilter Option> { f: F, } +#[allow(dead_code)] impl Option> LineFilter { pub fn new(f: F) -> Self { Self { f } @@ -32,15 +173,16 @@ impl Option> StreamFilter for LineFilter { } } -pub enum FilterMode { - Streaming(Box), - Buffered(fn(&str) -> String), +pub enum FilterMode<'a> { + Streaming(Box), + Buffered(Box String + 'a>), CaptureOnly, Passthrough, } pub enum StdinMode { Inherit, + #[allow(dead_code)] // future API: stdin filtering for interactive commands Filter(Box), Null, } @@ -48,6 +190,7 @@ pub enum StdinMode { pub struct StreamResult { pub exit_code: i32, pub raw: String, + pub raw_stdout: String, pub filtered: String, } @@ -76,8 +219,28 @@ pub fn status_to_exit_code(status: std::process::ExitStatus) -> i32 { pub fn run_streaming( cmd: &mut Command, stdin_mode: StdinMode, - stdout_mode: FilterMode, + stdout_mode: FilterMode<'_>, ) -> Result { + if matches!(stdout_mode, FilterMode::Passthrough) { + match &stdin_mode { + StdinMode::Inherit => { + cmd.stdin(Stdio::inherit()); + } + _ => { + cmd.stdin(Stdio::null()); + } + }; + cmd.stdout(Stdio::inherit()); + cmd.stderr(Stdio::inherit()); + let status = cmd.status().context("Failed to spawn process")?; + return Ok(StreamResult { + exit_code: status_to_exit_code(status), + raw: String::new(), + raw_stdout: String::new(), + filtered: String::new(), + }); + } + match &stdin_mode { StdinMode::Inherit => { cmd.stdin(Stdio::inherit()); @@ -96,6 +259,8 @@ pub fn run_streaming( } } + let live_stderr = matches!(stdout_mode, FilterMode::Streaming(_)); + let mut child = ChildGuard(cmd.spawn().context("Failed to spawn process")?); let stdin_thread: Option> = match stdin_mode { @@ -130,45 +295,44 @@ pub fn run_streaming( let stderr = child.0.stderr.take().context("No child stderr handle")?; let stderr_thread = std::thread::spawn(move || -> String { let mut raw_err = String::new(); - let stderr_out = io::stderr(); - let mut err_out = stderr_out.lock(); - for line in BufReader::new(stderr).lines().map_while(Result::ok) { - writeln!(err_out, "{}", line).ok(); - raw_err.push_str(&line); - raw_err.push('\n'); + if live_stderr { + let stderr_out = io::stderr(); + let mut err_out = stderr_out.lock(); + for line in BufReader::new(stderr).lines().map_while(Result::ok) { + writeln!(err_out, "{}", line).ok(); + raw_err.push_str(&line); + raw_err.push('\n'); + } + } else { + for line in BufReader::new(stderr).lines().map_while(Result::ok) { + raw_err.push_str(&line); + raw_err.push('\n'); + } } raw_err }); let stdout = child.0.stdout.take().context("No child stdout handle")?; - const RAW_CAP: usize = 1_048_576; + const RAW_CAP: usize = 10_485_760; let mut raw_stdout = String::new(); let mut filtered = String::new(); + let mut capped = false; + let mut saved_filter: Option> = None; { let stdout_handle = io::stdout(); let mut out = stdout_handle.lock(); match stdout_mode { - FilterMode::Passthrough => { - for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() + 1 <= RAW_CAP { - raw_stdout.push_str(&line); - raw_stdout.push('\n'); - } - match writeln!(out, "{}", line) { - Err(e) if e.kind() == io::ErrorKind::BrokenPipe => break, - Err(e) => return Err(e.into()), - Ok(_) => {} - } - } - filtered = raw_stdout.clone(); - } + FilterMode::Passthrough => unreachable!("handled by early-return above"), FilterMode::Streaming(mut filter) => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() + 1 <= RAW_CAP { + if raw_stdout.len() + line.len() < RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); + } else if !capped { + capped = true; + eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); } if let Some(output) = filter.feed_line(&line) { filtered.push_str(&output); @@ -186,12 +350,16 @@ pub fn run_streaming( Err(e) => return Err(e.into()), Ok(_) => {} } + saved_filter = Some(filter); } FilterMode::Buffered(filter_fn) => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() + 1 <= RAW_CAP { + if raw_stdout.len() + line.len() < RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); + } else if !capped { + capped = true; + eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); } } filtered = filter_fn(&raw_stdout); @@ -203,9 +371,12 @@ pub fn run_streaming( } FilterMode::CaptureOnly => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() + 1 <= RAW_CAP { + if raw_stdout.len() + line.len() < RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); + } else if !capped { + capped = true; + eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); } } filtered = raw_stdout.clone(); @@ -222,10 +393,20 @@ pub fn run_streaming( } let status = child.0.wait().context("Failed to wait for child")?; + let exit_code = status_to_exit_code(status); + let raw = format!("{}{}", raw_stdout, raw_stderr); + + if let Some(mut f) = saved_filter { + if let Some(post) = f.on_exit(exit_code, &raw) { + filtered.push_str(&post); + print!("{}", post); + } + } Ok(StreamResult { - exit_code: status_to_exit_code(status), - raw: format!("{}{}", raw_stdout, raw_stderr), + exit_code, + raw, + raw_stdout, filtered, }) } @@ -257,7 +438,7 @@ pub fn exec_capture(cmd: &mut Command) -> Result { } #[cfg(test)] -mod tests { +pub(crate) mod tests { use super::*; use std::process::Command; @@ -312,6 +493,7 @@ mod tests { let r = StreamResult { exit_code: 0, raw: String::new(), + raw_stdout: String::new(), filtered: String::new(), }; assert!(r.success()); @@ -322,6 +504,7 @@ mod tests { let r = StreamResult { exit_code: 1, raw: String::new(), + raw_stdout: String::new(), filtered: String::new(), }; assert!(!r.success()); @@ -332,6 +515,7 @@ mod tests { let r = StreamResult { exit_code: 137, raw: String::new(), + raw_stdout: String::new(), filtered: String::new(), }; assert!(!r.success()); @@ -343,7 +527,8 @@ mod tests { cmd.arg("hello"); let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); assert_eq!(result.exit_code, 0); - assert!(result.raw.contains("hello")); + // Passthrough inherits TTY — raw/filtered are empty + assert!(result.raw.is_empty()); } #[test] @@ -399,27 +584,33 @@ mod tests { fn test_run_streaming_buffered_filter() { let mut cmd = Command::new("printf"); cmd.arg("line1\nline2\nline3\n"); - fn upper(s: &str) -> String { - s.to_uppercase() - } - let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Buffered(upper)).unwrap(); + let result = run_streaming( + &mut cmd, + StdinMode::Null, + FilterMode::Buffered(Box::new(|s: &str| s.to_uppercase())), + ) + .unwrap(); assert!(result.filtered.contains("LINE1")); assert!(result.filtered.contains("LINE2")); assert_eq!(result.exit_code, 0); } #[test] - fn test_run_streaming_raw_cap_at_1mb() { + fn test_run_streaming_raw_cap_at_10mb() { let mut cmd = Command::new("sh"); - cmd.args(["-c", "yes | head -600000"]); - let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); + // ~11 MiB of 80-char lines (fast: fewer lines than `yes | head -6M`) + cmd.args([ + "-c", + "dd if=/dev/zero bs=1024 count=11264 2>/dev/null | tr '\\0' 'a' | fold -w 80", + ]); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly).unwrap(); assert!( - result.raw.len() <= 1_048_576 + 100, - "raw should be capped at ~1 MiB, got {} bytes", + result.raw.len() <= 10_485_760 + 100, + "raw should be capped at ~10 MiB, got {} bytes", result.raw.len() ); assert!( - result.raw.len() > 100_000, + result.raw.len() > 1_000_000, "Should have captured significant data" ); } @@ -427,7 +618,7 @@ mod tests { #[test] fn test_child_guard_prevents_zombie() { let mut cmd = Command::new("true"); - let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly); assert!(result.is_ok()); assert_eq!(result.unwrap().exit_code, 0); } @@ -443,16 +634,16 @@ mod tests { fn test_run_streaming_raw_contains_stdout() { let mut cmd = Command::new("echo"); cmd.arg("test_output_xyz"); - let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly).unwrap(); assert!(result.raw.contains("test_output_xyz")); } #[test] - fn test_run_streaming_filtered_equals_raw_in_passthrough() { + fn test_run_streaming_capture_only_filtered_equals_raw() { let mut cmd = Command::new("echo"); cmd.arg("check_equality"); - let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::Passthrough).unwrap(); - assert_eq!(result.filtered.trim(), result.raw.trim()); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly).unwrap(); + assert_eq!(result.filtered.trim(), result.raw_stdout.trim()); } #[test] @@ -500,4 +691,129 @@ mod tests { }; assert_eq!(r.combined(), ""); } + + pub fn run_block_filter(filter: &mut dyn StreamFilter, input: &str, exit_code: i32) -> String { + let mut output = String::new(); + for line in input.lines() { + if let Some(s) = filter.feed_line(line) { + output.push_str(&s); + } + } + output.push_str(&filter.flush()); + if let Some(post) = filter.on_exit(exit_code, input) { + output.push_str(&post); + } + output + } + + struct TestHandler; + + impl BlockHandler for TestHandler { + fn should_skip(&mut self, line: &str) -> bool { + line.starts_with("SKIP") + } + fn is_block_start(&mut self, line: &str) -> bool { + line.starts_with("ERROR") + } + fn is_block_continuation(&mut self, line: &str, _block: &[String]) -> bool { + line.starts_with(" ") + } + fn format_summary(&self, _exit_code: i32, _raw: &str) -> Option { + Some("DONE\n".to_string()) + } + } + + #[test] + fn test_block_filter_emits_blocks() { + let mut f = BlockStreamFilter::new(TestHandler); + let input = "SKIP noise\nERROR first\n detail1\nnon-block\nERROR second\n detail2\n"; + let result = run_block_filter(&mut f, input, 0); + assert!(result.contains("ERROR first\n detail1"), "got: {}", result); + assert!( + result.contains("ERROR second\n detail2"), + "got: {}", + result + ); + assert!(!result.contains("SKIP"), "got: {}", result); + assert!(result.ends_with("DONE\n"), "got: {}", result); + } + + #[test] + fn test_block_filter_no_blocks() { + let mut f = BlockStreamFilter::new(TestHandler); + let result = run_block_filter(&mut f, "nothing here\njust text\n", 0); + assert_eq!(result, "DONE\n"); + } + + #[test] + fn test_regex_block_filter_emits_blocks() { + let handler = RegexBlockFilter::new("test", r"^error\["); + let mut f = BlockStreamFilter::new(handler); + let input = "ok line\nerror[E0308]: mismatched types\n expected `u32`\nok again\nerror[E0599]: no method\n help: try\n"; + let result = run_block_filter(&mut f, input, 1); + assert!( + result.contains("error[E0308]: mismatched types\n expected `u32`"), + "got: {}", + result + ); + assert!( + result.contains("error[E0599]: no method\n help: try"), + "got: {}", + result + ); + assert!( + result.contains("test: 2 blocks in output"), + "got: {}", + result + ); + } + + #[test] + fn test_regex_block_filter_skip_prefix() { + let handler = RegexBlockFilter::new("test", r"^error").skip_prefix("warning:"); + let mut f = BlockStreamFilter::new(handler); + let input = "warning: unused var\nerror: bad type\n detail\nwarning: dead code\n"; + let result = run_block_filter(&mut f, input, 1); + assert!(result.contains("error: bad type"), "got: {}", result); + assert!(!result.contains("warning:"), "got: {}", result); + } + + #[test] + fn test_regex_block_filter_no_blocks() { + let handler = RegexBlockFilter::new("mytest", r"^FAIL"); + let mut f = BlockStreamFilter::new(handler); + let result = run_block_filter(&mut f, "all passed\nok\n", 0); + assert_eq!(result, "mytest: no errors found\n"); + } + + #[test] + fn test_regex_block_filter_indent_continuation() { + let handler = RegexBlockFilter::new("test", r"^ERR"); + let mut f = BlockStreamFilter::new(handler); + let input = "ERR space indent\n two spaces\n\ttab indent\nnon-indent\n"; + let result = run_block_filter(&mut f, input, 1); + assert!( + result.contains("ERR space indent\n two spaces\n\ttab indent"), + "got: {}", + result + ); + assert!(!result.contains("non-indent"), "got: {}", result); + } + + #[test] + fn test_regex_block_filter_multiple_skip_prefixes() { + let handler = + RegexBlockFilter::new("test", r"^error").skip_prefixes(&["note:", "warning:", "help:"]); + let mut f = BlockStreamFilter::new(handler); + let input = "note: see docs\nwarning: unused\nhelp: try this\nerror: fatal\n details\n"; + let result = run_block_filter(&mut f, input, 1); + assert!(!result.contains("note:"), "got: {}", result); + assert!(!result.contains("warning:"), "got: {}", result); + assert!(!result.contains("help:"), "got: {}", result); + assert!( + result.contains("error: fatal\n details"), + "got: {}", + result + ); + } } diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 4a295e256..8f21016c0 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -347,7 +347,8 @@ fn strip_trailing_redirects(cmd: &str) -> (&str, &str) { /// Returns `None` if the command is unsupported or ignored (hook should pass through). /// /// Handles compound commands (`&&`, `||`, `;`) by rewriting each segment independently. -/// For pipes (`|`), only rewrites the first command (the filter stays raw). +/// For pipes (`|`), only rewrites the left-hand command (pipe targets stay raw), +/// but continues rewriting segments after subsequent `&&`/`||`/`;` operators. pub fn rewrite_command(cmd: &str, excluded: &[String]) -> Option { let trimmed = cmd.trim(); if trimmed.is_empty() { @@ -381,6 +382,9 @@ fn rewrite_compound(cmd: &str, excluded: &[String]) -> Option { let mut seg_start: usize = 0; for tok in &tokens { + if tok.offset < seg_start { + continue; + } match tok.kind { TokenKind::Operator => { let seg = cmd[seg_start..tok.offset].trim(); @@ -420,9 +424,25 @@ fn rewrite_compound(cmd: &str, excluded: &[String]) -> Option { any_changed = true; } result.push_str(&rewritten); - result.push(' '); - result.push_str(cmd[tok.offset..].trim_start()); - return if any_changed { Some(result) } else { None }; + + let pipe_group_end = tokens.iter().find(|t| { + t.offset > tok.offset + && (t.kind == TokenKind::Operator + || (t.kind == TokenKind::Shellism && t.value == "&")) + }); + + match pipe_group_end { + Some(next_op) => { + result.push(' '); + result.push_str(cmd[tok.offset..next_op.offset].trim()); + seg_start = next_op.offset; + } + None => { + result.push(' '); + result.push_str(cmd[tok.offset..].trim_start()); + return if any_changed { Some(result) } else { None }; + } + } } TokenKind::Shellism if tok.value == "&" => { let seg = cmd[seg_start..tok.offset].trim(); @@ -2507,4 +2527,57 @@ mod tests { Classification::Ignored ); } + + // --- Pipe + operator rewrite --- + + #[test] + fn test_rewrite_pipe_then_and() { + assert_eq!( + rewrite_command("git log | head -5 && git stash", &[]), + Some("rtk git log | head -5 && rtk git stash".into()) + ); + } + + #[test] + fn test_rewrite_pipe_then_semicolon() { + assert_eq!( + rewrite_command("cargo test | head; git status", &[]), + Some("rtk cargo test | head; rtk git status".into()) + ); + } + + #[test] + fn test_rewrite_pipe_then_or() { + assert_eq!( + rewrite_command("cargo test | grep FAIL || git stash", &[]), + Some("rtk cargo test | grep FAIL || rtk git stash".into()) + ); + } + + #[test] + fn test_rewrite_env_pipe_then_and() { + assert_eq!( + rewrite_command( + "RUST_BACKTRACE=1 cargo test 2>&1 | grep FAILED && git stash", + &[] + ), + Some("RUST_BACKTRACE=1 rtk cargo test 2>&1 | grep FAILED && rtk git stash".into()) + ); + } + + #[test] + fn test_rewrite_and_then_pipe() { + assert_eq!( + rewrite_command("git status && cargo test | grep FAIL", &[]), + Some("rtk git status && rtk cargo test | grep FAIL".into()) + ); + } + + #[test] + fn test_rewrite_multi_pipe_then_and() { + assert_eq!( + rewrite_command("git log | head | tail && git status", &[]), + Some("rtk git log | head | tail && rtk git status".into()) + ); + } } diff --git a/src/hooks/hook_cmd.rs b/src/hooks/hook_cmd.rs index d270673ab..844875853 100644 --- a/src/hooks/hook_cmd.rs +++ b/src/hooks/hook_cmd.rs @@ -285,7 +285,10 @@ pub fn run_claude() -> Result<()> { let v: Value = match serde_json::from_str(input) { Ok(v) => v, - Err(_) => return Ok(()), + Err(e) => { + let _ = writeln!(io::stderr(), "[rtk hook] Failed to parse JSON input: {e}"); + return Ok(()); + } }; let cmd = match v diff --git a/src/main.rs b/src/main.rs index 220031bf6..5c6313289 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2028,7 +2028,6 @@ fn run_cli() -> Result { if raw.trim().is_empty() { 0 } else { - // Execute via shell passthrough with token tracking use std::process::Command as ProcCommand; let shell = if cfg!(windows) { "cmd" } else { "sh" }; let flag = if cfg!(windows) { "/C" } else { "-c" }; @@ -2079,8 +2078,30 @@ fn run_cli() -> Result { eprintln!("Proxy mode: {} {}", cmd_name, cmd_args.join(" ")); } - // ISSUE #897: ChildGuard kills child on error/panic to prevent - // orphan processes that caused a 514GB memory leak + kernel panic. + // ISSUE #897: Kill proxy child on SIGINT/SIGTERM to prevent orphan + // processes. Drop-based ChildGuard doesn't run on signals with + // panic=abort, so we register a signal handler that kills the child + // PID stored in this atomic. + use std::sync::atomic::{AtomicU32, Ordering}; + static PROXY_CHILD_PID: AtomicU32 = AtomicU32::new(0); + + #[cfg(unix)] + { + unsafe extern "C" fn handle_signal(sig: libc::c_int) { + let pid = PROXY_CHILD_PID.load(Ordering::SeqCst); + if pid != 0 { + libc::kill(pid as libc::pid_t, libc::SIGTERM); + libc::waitpid(pid as libc::pid_t, std::ptr::null_mut(), 0); + } + libc::signal(sig, libc::SIG_DFL); + libc::raise(sig); + } + unsafe { + libc::signal(libc::SIGINT, handle_signal as libc::sighandler_t); + libc::signal(libc::SIGTERM, handle_signal as libc::sighandler_t); + } + } + struct ChildGuard(Option); impl Drop for ChildGuard { fn drop(&mut self) { @@ -2088,6 +2109,7 @@ fn run_cli() -> Result { let _ = child.kill(); let _ = child.wait(); } + PROXY_CHILD_PID.store(0, Ordering::SeqCst); } } @@ -2100,6 +2122,11 @@ fn run_cli() -> Result { .context(format!("Failed to execute command: {}", cmd_name))?, )); + // Store child PID for signal handler before anything can fail + if let Some(ref inner) = child.0 { + PROXY_CHILD_PID.store(inner.id(), Ordering::SeqCst); + } + let inner = child.0.as_mut().context("Child process missing")?; let stdout_pipe = inner .stdout From 2bb5265595c4a80fe1ad7e9ab3ffc8dd013b019c Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 10 Apr 2026 12:09:29 +0200 Subject: [PATCH 11/44] fix(stream): P0 fixes from PR #956 review - pipe_cmd: fix panic on multi-byte UTF-8 at 1024 byte boundary (floor_char_boundary in auto_detect_filter) - pipe_cmd: cap stdin at 10 MiB to prevent OOM (reuses RAW_CAP) - stream: hoist RAW_CAP to pub const at module level - hook_cmd: check deny before get_rewritten in handle_vscode (matches handle_copilot_cli and run_claude order) - hook_cmd: escape backslash and pipe in audit log sanitizer - tsc_cmd: hoist duplicate TSC_ERROR regex to single module-level lazy_static --- src/cmds/js/tsc_cmd.rs | 18 +++++++----------- src/cmds/system/pipe_cmd.rs | 21 ++++++++++++++++++++- src/core/stream.rs | 3 ++- src/hooks/hook_cmd.rs | 28 ++++++++++++++++++++-------- 4 files changed, 49 insertions(+), 21 deletions(-) diff --git a/src/cmds/js/tsc_cmd.rs b/src/cmds/js/tsc_cmd.rs index 6c1f23cb5..e87988289 100644 --- a/src/cmds/js/tsc_cmd.rs +++ b/src/cmds/js/tsc_cmd.rs @@ -4,9 +4,16 @@ use crate::core::runner; use crate::core::stream::{BlockHandler, BlockStreamFilter}; use crate::core::utils::{resolved_command, tool_exists, truncate}; use anyhow::Result; +use lazy_static::lazy_static; use regex::Regex; use std::collections::{HashMap, HashSet}; +lazy_static! { + static ref TSC_ERROR: Regex = Regex::new( + r"^(.+?)\((\d+),(\d+)\):\s+(error|warning)\s+(TS\d+):\s+(.+)$" + ).unwrap(); +} + pub fn run(args: &[String], verbose: u8) -> Result { let tsc_exists = tool_exists("tsc"); @@ -58,11 +65,6 @@ impl BlockHandler for TscHandler { } fn is_block_start(&mut self, line: &str) -> bool { - lazy_static::lazy_static! { - static ref TSC_ERROR: Regex = Regex::new( - r"^(.+?)\((\d+),(\d+)\):\s+(error|warning)\s+(TS\d+):\s+(.+)$" - ).unwrap(); - } if let Some(caps) = TSC_ERROR.captures(line) { self.error_count += 1; self.files.insert(caps[1].to_string()); @@ -104,12 +106,6 @@ impl BlockHandler for TscHandler { } pub(crate) fn filter_tsc_output(output: &str) -> String { - lazy_static::lazy_static! { - // Pattern: src/file.ts(12,5): error TS2322: Type 'string' is not assignable to type 'number'. - static ref TSC_ERROR: Regex = Regex::new( - r"^(.+?)\((\d+),(\d+)\):\s+(error|warning)\s+(TS\d+):\s+(.+)$" - ).unwrap(); - } struct TsError { file: String, diff --git a/src/cmds/system/pipe_cmd.rs b/src/cmds/system/pipe_cmd.rs index 5736d173d..304f3f2af 100644 --- a/src/cmds/system/pipe_cmd.rs +++ b/src/cmds/system/pipe_cmd.rs @@ -1,6 +1,8 @@ use anyhow::Result; use std::io::Read; +use crate::core::stream::RAW_CAP; + pub fn resolve_filter(name: &str) -> Option String> { match name { "cargo-test" | "cargo" => Some(crate::cmds::rust::cargo_cmd::filter_cargo_test), @@ -128,7 +130,10 @@ fn find_wrapper(input: &str) -> String { } pub fn auto_detect_filter(input: &str) -> fn(&str) -> String { - let first_1k = &input[..input.len().min(1024)]; + let end = input.len().min(1024); + // Avoid panic: byte 1024 may fall inside a multi-byte UTF-8 char + let end = input.floor_char_boundary(end); + let first_1k = &input[..end]; if first_1k.contains("test result:") && first_1k.contains("passed;") { return crate::cmds::rust::cargo_cmd::filter_cargo_test; @@ -189,8 +194,12 @@ fn identity_filter(input: &str) -> String { pub fn run(filter_name: Option<&str>, passthrough: bool) -> Result<()> { let mut buf = String::new(); std::io::stdin() + .take((RAW_CAP + 1) as u64) .read_to_string(&mut buf) .map_err(|e| anyhow::anyhow!("Failed to read stdin: {}", e))?; + if buf.len() > RAW_CAP { + anyhow::bail!("stdin exceeds {} byte limit", RAW_CAP); + } if passthrough { print!("{}", buf); @@ -405,6 +414,16 @@ mod tests { assert_eq!(out, ""); } + #[test] + fn test_auto_detect_multibyte_at_1024_boundary() { + // Build input where byte 1024 falls inside a multi-byte char (é = 2 bytes) + let mut input = "a".repeat(1023); + input.push('é'); // 2-byte char starting at byte 1023, ends at 1025 + let f = auto_detect_filter(&input); + let out = f(&input); + assert_eq!(out, input); + } + #[test] fn test_auto_detect_single_line_unknown() { let input = "hello world\n"; diff --git a/src/core/stream.rs b/src/core/stream.rs index 7e0dcba15..212316d7b 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -216,6 +216,8 @@ pub fn status_to_exit_code(status: std::process::ExitStatus) -> i32 { } // ISSUE #897: ChildGuard RAII prevents zombie processes that caused kernel panic +pub const RAW_CAP: usize = 10_485_760; // 10 MiB + pub fn run_streaming( cmd: &mut Command, stdin_mode: StdinMode, @@ -313,7 +315,6 @@ pub fn run_streaming( }); let stdout = child.0.stdout.take().context("No child stdout handle")?; - const RAW_CAP: usize = 10_485_760; let mut raw_stdout = String::new(); let mut filtered = String::new(); let mut capped = false; diff --git a/src/hooks/hook_cmd.rs b/src/hooks/hook_cmd.rs index 844875853..ed8bd29c8 100644 --- a/src/hooks/hook_cmd.rs +++ b/src/hooks/hook_cmd.rs @@ -121,18 +121,16 @@ fn get_rewritten(cmd: &str) -> Option { } fn handle_vscode(cmd: &str) -> Result<()> { - let rewritten = match get_rewritten(cmd) { - Some(r) => r, - None => return Ok(()), - }; - let verdict = permissions::check_command(cmd); - - // Deny: pass through without rewrite — let the host tool handle it. if verdict == PermissionVerdict::Deny { return Ok(()); } + let rewritten = match get_rewritten(cmd) { + Some(r) => r, + None => return Ok(()), + }; + // Allow (explicit rule matched): auto-allow the rewritten command. // Ask/Default (no allow rule matched): rewrite but let the host tool prompt. let decision = match verdict { @@ -247,7 +245,10 @@ fn audit_log(action: &str, original: &str, rewritten: &str) { /// Escape newlines to prevent log-line injection in the pipe-delimited audit log. fn sanitize_log_field(s: &str) -> String { - s.replace('\n', "\\n").replace('\r', "\\r") + s.replace('\\', "\\\\") + .replace('|', "\\|") + .replace('\n', "\\n") + .replace('\r', "\\r") } fn audit_log_inner(action: &str, original: &str, rewritten: &str) -> Option<()> { @@ -822,6 +823,17 @@ mod tests { assert!(sanitized.contains("\\n")); } + #[test] + fn test_audit_log_sanitizes_pipe_delimiter() { + let sanitized = sanitize_log_field("git log | head"); + assert!( + !sanitized.contains(" | "), + "unescaped ' | ' breaks field parsing: {}", + sanitized + ); + assert!(sanitized.contains("\\|")); + } + #[test] fn test_claude_unicode_null_passthrough() { let input = claude_input("git status \u{0000}\u{FEFF}"); From dc5877579ef97f10a57136fa0ccf9b0049d67aca Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 10 Apr 2026 13:58:52 +0200 Subject: [PATCH 12/44] docs(docs): stop manual CHANGELOG edits and use release please --- .claude/agents/rust-rtk.md | 2 +- .claude/skills/ship.md | 37 +++++++----------------- CONTRIBUTING.md | 58 ++++++++++++++++++++++++++++++-------- src/cmds/README.md | 2 +- 4 files changed, 59 insertions(+), 40 deletions(-) diff --git a/.claude/agents/rust-rtk.md b/.claude/agents/rust-rtk.md index 8efe67f0e..d32e344b7 100644 --- a/.claude/agents/rust-rtk.md +++ b/.claude/agents/rust-rtk.md @@ -509,7 +509,7 @@ rtk newcmd args - Update `CLAUDE.md` Module Responsibilities table - Update `README.md` with command support -- Update `CHANGELOG.md` +- CHANGELOG.md is auto-generated by release-please — do not edit manually ## Performance Targets diff --git a/.claude/skills/ship.md b/.claude/skills/ship.md index 380a8ba2b..b774bcb42 100644 --- a/.claude/skills/ship.md +++ b/.claude/skills/ship.md @@ -61,8 +61,9 @@ git status # Should show "nothing to commit, working tree clean" **Files to update**: 1. `Cargo.toml` (line 3): `version = "X.Y.Z"` -2. `CHANGELOG.md` (add new section) -3. `README.md` (if version mentioned) +2. `README.md` (if version mentioned) + +> **Note**: `CHANGELOG.md` is auto-generated by release-please from conventional commit messages — do not edit manually. **Example**: ```toml @@ -77,21 +78,11 @@ name = "rtk" version = "0.17.0" # New version ``` -**CHANGELOG.md template**: -```markdown -## [0.17.0] - 2026-02-15 - -### Added -- `rtk pytest` command for Python test filtering (90% token reduction) -- Support for `pytest` JSON output parsing -- Integration with `uv` package manager auto-detection - -### Fixed -- Shell escaping for PowerShell on Windows -- Memory leak in regex pattern caching - -### Changed -- Updated `cargo test` filter to show test names in failures +**Commit message quality matters** — release-please generates CHANGELOG entries directly from your `feat:` and `fix:` commits: +``` +feat(pytest): add Python test filtering with JSON output parsing +fix(shell): correct PowerShell escaping on Windows +perf(cargo): lazy-compile clippy regex patterns ``` ### Step 3: Build and Verify @@ -119,13 +110,12 @@ hyperfine 'target/release/rtk git status' --warmup 3 ```bash # Stage version files -git add Cargo.toml Cargo.lock CHANGELOG.md README.md +git add Cargo.toml Cargo.lock README.md # Commit with version tag git commit -m "chore(release): bump version to v0.17.0 - Updated Cargo.toml version -- Updated CHANGELOG.md with release notes - Verified all quality checks pass - Benchmarked performance (<10ms startup) @@ -361,14 +351,7 @@ target/release/rtk --version **Symptom**: CHANGELOG.md has conflicts after rebase -**Solution**: -```bash -# Always add new entries at top -# Manual merge: -# 1. Keep all entries from both branches -# 2. Sort by version (newest first) -# 3. Ensure date format consistency -``` +**Solution**: Do not edit CHANGELOG.md manually. It is auto-generated by release-please from conventional commit messages when merging to master. ## Security Considerations diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4ae5bfca5..6cd87369b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -107,15 +107,50 @@ For the step-by-step checklist (create filter, register rewrite pattern, registe --- +## Commit Messages & Changelog + +RTK uses [Conventional Commits](https://www.conventionalcommits.org/) and [release-please](https://github.com/googleapis/release-please) to **auto-generate CHANGELOG.md, version bumps, and GitHub releases**. Never edit `CHANGELOG.md` manually — it is fully managed by release-please from your commit messages. + +### Commit format + +``` +(): +``` + +| Type | Semver Impact | When to Use | +|------|---------------|-------------| +| `feat` | Minor | New features, new filters, new command support | +| `fix` | Patch | Bug fixes, corrections | +| `perf` | Patch | Performance improvements | +| `refactor` | — | Code restructuring (no changelog entry) | +| `docs` | — | Documentation only | +| `chore` | — | Maintenance, CI, deps | +| `feat!` / `fix!` | Major | Breaking changes (add `!` after type) | + +**Scope** should match the module or area: `git`, `cargo`, `gh`, `hook`, `tracking`, `cicd`, etc. + +### Examples + +``` +feat(kubectl): add pod log filtering +fix(git): preserve merge commit messages in log filter +perf(cargo): lazy-compile clippy regex patterns +feat!(hook): change rewrite config format +``` + +These commit messages directly become CHANGELOG entries when release-please creates a release PR. Write them as if they will be read by users. + +--- + ## Branch Naming Convention Git branch names cannot include spaces or colons, so we use slash-prefixed names. Pick the prefix that matches your change type and follow it with an optional scope and a short, kebab-case description. -| Prefix | Semver Impact | When to Use | -|--------|---------------|-------------| -| `fix/` | Patch | Bug fixes, corrections, minor adjustments | -| `feat/` | Minor | New features, new filters, new command support | -| `chore/` | Major | Breaking changes, API changes, removed functionality | +| Prefix | When to Use | +|--------|-------------| +| `fix/` | Bug fixes, corrections, minor adjustments | +| `feat/` | New features, new filters, new command support | +| `chore/` | CI/CD, deps, maintenance, breaking changes | Combine the prefix with a scope if it adds clarity (e.g. `git`, `kubectl`, `filter`, `tracking`, `config`) and finish with a descriptive slug: `fix/-` or `feat/`. @@ -137,7 +172,7 @@ chore/release-pipeline-cleanup **For large features or refactors**, prefer multi-part PRs over one enormous PR. Split the work into logical, reviewable chunks that can each be merged independently. Examples: - feat(Part 1): Add data model and tests - feat(Part 2): Add CLI command and integration -- feat(Part 3): Update documentation and CHANGELOG +- feat(Part 3): Update documentation **Why**: Small, focused PRs are easier to review, safer to merge, and faster to ship. Large PRs slow down review, hide bugs, and increase merge conflict risk. @@ -166,7 +201,7 @@ Every change **must** include tests. See [Testing](#testing) below. ### 4. Add Documentation -Every change **must** include documentation updates. See [Documentation](#documentation) below. +Documentation updates are required for new filters, new features, and changes that affect already-documented behavior. Bug fixes and refactors typically don't need doc updates. See [Documentation](#documentation) below. ### Contributor License Agreement (CLA) @@ -235,17 +270,18 @@ cargo fmt --all --check && cargo clippy --all-targets && cargo test ## Documentation -Every change **must** include documentation updates. Use this table to find which docs to update: +Documentation updates are required for new filters, new features, and changes that affect already-documented behavior. Use this table to find which docs to update: | What you changed | Update these docs | |------------------|-------------------| -| New Rust filter (`src/cmds/`) | Ecosystem `README.md` (e.g., `src/cmds/git/README.md`), [README.md](README.md) command list, [CHANGELOG.md](CHANGELOG.md) | -| New TOML filter (`src/filters/`) | [src/filters/README.md](src/filters/README.md) if naming conventions change, [README.md](README.md) command list, [CHANGELOG.md](CHANGELOG.md) | +| New Rust filter (`src/cmds/`) | Ecosystem `README.md` (e.g., `src/cmds/git/README.md`), [README.md](README.md) command list | +| New TOML filter (`src/filters/`) | [src/filters/README.md](src/filters/README.md) if naming conventions change, [README.md](README.md) command list | | New rewrite pattern | `src/discover/rules.rs` — see [Adding a New Command Filter](src/cmds/README.md#adding-a-new-command-filter) | | Core infrastructure (`src/core/`) | [src/core/README.md](src/core/README.md), [docs/contributing/TECHNICAL.md](docs/contributing/TECHNICAL.md) if flow changes | | Hook system (`src/hooks/`) | [src/hooks/README.md](src/hooks/README.md), [hooks/README.md](hooks/README.md) for agent-facing docs | | Architecture or design change | [ARCHITECTURE.md](docs/contributing/ARCHITECTURE.md), [docs/contributing/TECHNICAL.md](docs/contributing/TECHNICAL.md) | -| Bug fix or breaking change | [CHANGELOG.md](CHANGELOG.md) | + +> **Note**: Do NOT edit `CHANGELOG.md` manually — it is auto-generated by [release-please](https://github.com/googleapis/release-please) from your commit messages. See [Commit Messages & Changelog](#commit-messages--changelog). **Navigation**: [CONTRIBUTING.md](CONTRIBUTING.md) (you are here) → [docs/contributing/TECHNICAL.md](docs/contributing/TECHNICAL.md) (architecture + flow) → each folder's `README.md` (implementation details). diff --git a/src/cmds/README.md b/src/cmds/README.md index 5e0f633b3..e2260ba91 100644 --- a/src/cmds/README.md +++ b/src/cmds/README.md @@ -194,7 +194,7 @@ Adding a new filter or command requires changes in multiple places. For TOML-vs- - Add routing match arm in `main.rs`: `Commands::Mycmd { args } => mycmd_cmd::run(&args, cli.verbose)?,` 3. **Add rewrite pattern** — Entry in `src/discover/rules.rs` (PATTERNS + RULES arrays at matching index) so hooks auto-rewrite the command 4. **Write tests** — Real fixture, snapshot test, token savings >= 60% (see [testing rules](../../.claude/rules/cli-testing.md)) -5. **Update docs** — Ecosystem README, CHANGELOG.md +5. **Update docs** — Ecosystem README (CHANGELOG.md is auto-generated by release-please) ### TOML filter (simple line-based filtering) From 71eeedab4d771986b3d3dc5c439f5646135ff96c Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Fri, 10 Apr 2026 14:00:23 +0200 Subject: [PATCH 13/44] feat(stream): P1 fixes from PR #956 review + trigger feat release tag --- src/cmds/system/pipe_cmd.rs | 66 ++++++++++++++++++++++++++++++++++++- src/core/stream.rs | 41 ++++++++++++++++++++--- src/hooks/hook_cmd.rs | 15 +++++++++ src/main.rs | 2 +- 4 files changed, 117 insertions(+), 7 deletions(-) diff --git a/src/cmds/system/pipe_cmd.rs b/src/cmds/system/pipe_cmd.rs index 304f3f2af..c0c73b272 100644 --- a/src/cmds/system/pipe_cmd.rs +++ b/src/cmds/system/pipe_cmd.rs @@ -191,6 +191,14 @@ fn identity_filter(input: &str) -> String { input.to_string() } +fn apply_filter(filter_fn: fn(&str) -> String, input: &str) -> String { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| filter_fn(input))) + .unwrap_or_else(|_| { + eprintln!("[rtk] warning: filter panicked — passing through raw output"); + input.to_string() + }) +} + pub fn run(filter_name: Option<&str>, passthrough: bool) -> Result<()> { let mut buf = String::new(); std::io::stdin() @@ -218,7 +226,7 @@ pub fn run(filter_name: Option<&str>, passthrough: bool) -> Result<()> { None => auto_detect_filter(&buf), }; - let output = filter_fn(&buf); + let output = apply_filter(filter_fn, &buf); print!("{}", output); Ok(()) } @@ -457,6 +465,62 @@ mod tests { assert!(resolve_filter("prettier").is_some()); } + #[test] + fn test_panicking_filter_returns_passthrough() { + fn panicking_filter(_input: &str) -> String { + panic!("filter bug"); + } + let input = "some output\n"; + let result = super::apply_filter(panicking_filter, input); + assert_eq!(result, input); + } + + fn count_tokens(s: &str) -> usize { + s.split_whitespace().count() + } + + #[test] + fn test_grep_wrapper_token_savings() { + // Realistic rg output: 200 matches across 10 files (20 per file → 10 shown + truncation) + let mut input = String::new(); + for file_idx in 1..=10 { + for line in 1..=20 { + input.push_str(&format!( + "src/cmds/module{}/handler.rs:{}: let result = process_request(ctx, &payload).await?;\n", + file_idx, line * 10 + )); + } + } + let output = grep_wrapper(&input); + let savings = 100.0 - (count_tokens(&output) as f64 / count_tokens(&input) as f64 * 100.0); + assert!( + savings >= 40.0, + "grep filter: expected ≥40% savings, got {:.1}% (in={}, out={})", + savings, count_tokens(&input), count_tokens(&output) + ); + } + + #[test] + fn test_find_wrapper_token_savings() { + // Realistic find output: 500 files across 30 dirs (20-dir cap + 10-file cap both trigger) + let mut input = String::new(); + for dir in 1..=30 { + for file in 1..=17 { + input.push_str(&format!( + "./src/components/feature{}/sub_{}/component_{}.tsx\n", + dir, dir, file + )); + } + } + let output = find_wrapper(&input); + let savings = 100.0 - (count_tokens(&output) as f64 / count_tokens(&input) as f64 * 100.0); + assert!( + savings >= 40.0, + "find filter: expected ≥40% savings, got {:.1}% (in={}, out={})", + savings, count_tokens(&input), count_tokens(&output) + ); + } + #[test] fn test_auto_detect_mypy_output() { let input = "src/app.py:42: error: Argument 1 has incompatible type [arg-type]\n\ diff --git a/src/core/stream.rs b/src/core/stream.rs index 212316d7b..9f662477f 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -297,18 +297,28 @@ pub fn run_streaming( let stderr = child.0.stderr.take().context("No child stderr handle")?; let stderr_thread = std::thread::spawn(move || -> String { let mut raw_err = String::new(); + let mut capped = false; if live_stderr { let stderr_out = io::stderr(); let mut err_out = stderr_out.lock(); for line in BufReader::new(stderr).lines().map_while(Result::ok) { writeln!(err_out, "{}", line).ok(); - raw_err.push_str(&line); - raw_err.push('\n'); + if raw_err.len() + line.len() < RAW_CAP { + raw_err.push_str(&line); + raw_err.push('\n'); + } else if !capped { + capped = true; + eprintln!("[rtk] warning: stderr exceeds 10 MiB — capture truncated"); + } } } else { for line in BufReader::new(stderr).lines().map_while(Result::ok) { - raw_err.push_str(&line); - raw_err.push('\n'); + if raw_err.len() + line.len() < RAW_CAP { + raw_err.push_str(&line); + raw_err.push('\n'); + } else if !capped { + capped = true; + } } } raw_err @@ -400,7 +410,11 @@ pub fn run_streaming( if let Some(mut f) = saved_filter { if let Some(post) = f.on_exit(exit_code, &raw) { filtered.push_str(&post); - print!("{}", post); + match write!(io::stdout(), "{}", post) { + Err(e) if e.kind() == io::ErrorKind::BrokenPipe => {} + Err(e) => return Err(e.into()), + Ok(_) => {} + } } } @@ -616,6 +630,23 @@ pub(crate) mod tests { ); } + #[test] + fn test_run_streaming_stderr_cap_at_10mb() { + let mut cmd = Command::new("sh"); + // ~11 MiB on stderr, nothing on stdout + cmd.args([ + "-c", + "dd if=/dev/zero bs=1024 count=11264 2>/dev/null | tr '\\0' 'a' | fold -w 80 1>&2", + ]); + let result = run_streaming(&mut cmd, StdinMode::Null, FilterMode::CaptureOnly).unwrap(); + // raw = raw_stdout + raw_stderr; stdout is empty so raw ≈ stderr size + assert!( + result.raw.len() <= RAW_CAP + 200, + "stderr in raw should be capped at ~10 MiB, got {} bytes", + result.raw.len() + ); + } + #[test] fn test_child_guard_prevents_zombie() { let mut cmd = Command::new("true"); diff --git a/src/hooks/hook_cmd.rs b/src/hooks/hook_cmd.rs index ed8bd29c8..1481fc218 100644 --- a/src/hooks/hook_cmd.rs +++ b/src/hooks/hook_cmd.rs @@ -856,4 +856,19 @@ mod tests { PermissionVerdict::Deny ); } + + #[test] + fn test_gemini_deny_blocks_rewrite() { + use super::permissions::check_command_with_rules; + let deny = vec!["cargo test".to_string()]; + assert_eq!( + check_command_with_rules("cargo test", &deny, &[], &[]), + PermissionVerdict::Deny + ); + // Denied commands must not be rewritten — Gemini handler checks deny before rewrite + assert!( + get_rewritten("cargo test").is_some(), + "cargo test should be rewritable when not denied" + ); + } } diff --git a/src/main.rs b/src/main.rs index 5c6313289..c5a176f79 100644 --- a/src/main.rs +++ b/src/main.rs @@ -560,7 +560,7 @@ enum Commands { min_occurrences: usize, }, - /// Execute a shell command via the RTK native executor (filters + tracking) + /// Execute a shell command via sh -c (raw, no filtering or tracking) Run { /// Command string to execute (use -c for shell-like invocation) #[arg(short = 'c', long = "command")] From 840571fe90ed14fb7e96f9b9000a1bac6d196d23 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sat, 11 Apr 2026 17:24:04 +0200 Subject: [PATCH 14/44] fix(core): review 956 various fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 4 fixes applied (all confirmed introduced by PR #956, all tests pass): - P0 NEW-passthrough — pipe_cmd.rs: passthrough before cap read - P1 BUFFERED-panic — stream.rs: catch_unwind on Buffered filter - P1 STREAM-postcap — stream.rs: stop feeding filter after cap - P2 OFFBYONE-rawcap — stream.rs: 5 cap boundary checks fixed 5 findings dropped (not introduced by PR or not bugs): - DENY-claude: pre-existing on master - AUDIT-asymmetry: intentional scope choice, not a bug - GEMINI-test: pre-existing test pattern from master - SAVINGS-threshold: 40% is correct (filters achieve ~46%) - STDERR-test: cosmetic CI, not correctness --- src/cmds/rust/cargo_cmd.rs | 106 +++++++++-------------------- src/cmds/system/pipe_cmd.rs | 11 +-- src/core/stream.rs | 24 +++++-- src/discover/lexer.rs | 77 +++++++++++++++++++++ src/discover/registry.rs | 33 +-------- src/hooks/hook_cmd.rs | 129 ++++++++++++++++++------------------ src/hooks/permissions.rs | 28 ++++++-- 7 files changed, 220 insertions(+), 188 deletions(-) diff --git a/src/cmds/rust/cargo_cmd.rs b/src/cmds/rust/cargo_cmd.rs index 41fa43466..f94b04a7c 100644 --- a/src/cmds/rust/cargo_cmd.rs +++ b/src/cmds/rust/cargo_cmd.rs @@ -791,98 +791,56 @@ fn filter_cargo_nextest(output: &str) -> String { } fn filter_cargo_build(output: &str) -> String { - let mut errors: Vec = Vec::new(); - let mut warnings = 0; - let mut error_count = 0; - let mut compiled = 0; - let mut in_error = false; - let mut current_error = Vec::new(); - let mut finished_line: Option = None; + let mut handler = CargoBuildHandler::new(); + let mut blocks: Vec> = Vec::new(); + let mut current_block: Vec = Vec::new(); + let mut in_block = false; for line in output.lines() { - if line.trim_start().starts_with("Compiling") || line.trim_start().starts_with("Checking") { - compiled += 1; + if handler.should_skip(line) { continue; } - if line.trim_start().starts_with("Downloading") - || line.trim_start().starts_with("Downloaded") - { - continue; - } - if line.trim_start().starts_with("Finished") { - finished_line = Some(line.trim_start().to_string()); - continue; - } - - // Detect error/warning blocks - if line.starts_with("error[") || line.starts_with("error:") { - // Skip "error: aborting due to" summary lines - if line.contains("aborting due to") || line.contains("could not compile") { - continue; - } - if in_error && !current_error.is_empty() { - errors.push(current_error.join("\n")); - current_error.clear(); - } - error_count += 1; - in_error = true; - current_error.push(line.to_string()); - } else if line.starts_with("warning:") - && line.contains("generated") - && line.contains("warning") - { - // "warning: `crate` generated N warnings" summary line - continue; - } else if line.starts_with("warning:") || line.starts_with("warning[") { - if in_error && !current_error.is_empty() { - errors.push(current_error.join("\n")); - current_error.clear(); + if handler.is_block_start(line) { + if in_block && !current_block.is_empty() { + blocks.push(std::mem::take(&mut current_block)); } - warnings += 1; - in_error = true; - current_error.push(line.to_string()); - } else if in_error { - if line.trim().is_empty() && current_error.len() > 3 { - errors.push(current_error.join("\n")); - current_error.clear(); - in_error = false; + in_block = true; + current_block.push(line.to_string()); + } else if in_block { + if handler.is_block_continuation(line, ¤t_block) { + current_block.push(line.to_string()); } else { - current_error.push(line.to_string()); + blocks.push(std::mem::take(&mut current_block)); + in_block = false; } } } - - if !current_error.is_empty() { - errors.push(current_error.join("\n")); + if !current_block.is_empty() { + blocks.push(current_block); } - if error_count == 0 && warnings == 0 { - return if let Some(finished) = finished_line { - format!("cargo build ({} crates compiled)\n{}", compiled, finished) - } else { - format!("cargo build ({} crates compiled)", compiled) - }; + if handler.error_count == 0 && handler.warnings == 0 { + let mut s = format!("cargo build ({} crates compiled)", handler.compiled); + if let Some(ref finished) = handler.finished_line { + s = format!("{}\n{}", s, finished); + } + return s; } - let mut result = String::new(); - result.push_str(&format!( - "cargo build: {} errors, {} warnings ({} crates)\n", - error_count, warnings, compiled - )); - result.push_str("═══════════════════════════════════════\n"); - - for (i, err) in errors.iter().enumerate().take(15) { - result.push_str(err); + let mut result = format!( + "cargo build: {} errors, {} warnings ({} crates)\n═══════════════════════════════════════\n", + handler.error_count, handler.warnings, handler.compiled + ); + for (i, blk) in blocks.iter().enumerate().take(15) { + result.push_str(&blk.join("\n")); result.push('\n'); - if i < errors.len() - 1 { + if i < blocks.len() - 1 { result.push('\n'); } } - - if errors.len() > 15 { - result.push_str(&format!("\n... +{} more issues\n", errors.len() - 15)); + if blocks.len() > 15 { + result.push_str(&format!("\n... +{} more issues\n", blocks.len() - 15)); } - result.trim().to_string() } diff --git a/src/cmds/system/pipe_cmd.rs b/src/cmds/system/pipe_cmd.rs index c0c73b272..0af3448f3 100644 --- a/src/cmds/system/pipe_cmd.rs +++ b/src/cmds/system/pipe_cmd.rs @@ -200,6 +200,12 @@ fn apply_filter(filter_fn: fn(&str) -> String, input: &str) -> String { } pub fn run(filter_name: Option<&str>, passthrough: bool) -> Result<()> { + if passthrough { + std::io::copy(&mut std::io::stdin(), &mut std::io::stdout()) + .map_err(|e| anyhow::anyhow!("Failed to relay stdin: {}", e))?; + return Ok(()); + } + let mut buf = String::new(); std::io::stdin() .take((RAW_CAP + 1) as u64) @@ -209,11 +215,6 @@ pub fn run(filter_name: Option<&str>, passthrough: bool) -> Result<()> { anyhow::bail!("stdin exceeds {} byte limit", RAW_CAP); } - if passthrough { - print!("{}", buf); - return Ok(()); - } - let filter_fn = match filter_name { Some(name) => resolve_filter(name).ok_or_else(|| { anyhow::anyhow!( diff --git a/src/core/stream.rs b/src/core/stream.rs index 9f662477f..75d576e54 100644 --- a/src/core/stream.rs +++ b/src/core/stream.rs @@ -303,7 +303,7 @@ pub fn run_streaming( let mut err_out = stderr_out.lock(); for line in BufReader::new(stderr).lines().map_while(Result::ok) { writeln!(err_out, "{}", line).ok(); - if raw_err.len() + line.len() < RAW_CAP { + if raw_err.len() + line.len() + 1 <= RAW_CAP { raw_err.push_str(&line); raw_err.push('\n'); } else if !capped { @@ -313,7 +313,7 @@ pub fn run_streaming( } } else { for line in BufReader::new(stderr).lines().map_while(Result::ok) { - if raw_err.len() + line.len() < RAW_CAP { + if raw_err.len() + line.len() + 1 <= RAW_CAP { raw_err.push_str(&line); raw_err.push('\n'); } else if !capped { @@ -338,12 +338,16 @@ pub fn run_streaming( FilterMode::Passthrough => unreachable!("handled by early-return above"), FilterMode::Streaming(mut filter) => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() < RAW_CAP { + if capped { + continue; + } + if raw_stdout.len() + line.len() + 1 <= RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); - } else if !capped { + } else { capped = true; eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); + continue; } if let Some(output) = filter.feed_line(&line) { filtered.push_str(&output); @@ -365,7 +369,7 @@ pub fn run_streaming( } FilterMode::Buffered(filter_fn) => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() < RAW_CAP { + if raw_stdout.len() + line.len() + 1 <= RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); } else if !capped { @@ -373,7 +377,13 @@ pub fn run_streaming( eprintln!("[rtk] warning: output exceeds 10 MiB — filter input truncated"); } } - filtered = filter_fn(&raw_stdout); + filtered = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + filter_fn(&raw_stdout) + })) + .unwrap_or_else(|_| { + eprintln!("[rtk] warning: filter panicked — passing through raw output"); + raw_stdout.clone() + }); match write!(out, "{}", filtered) { Err(e) if e.kind() == io::ErrorKind::BrokenPipe => {} Err(e) => return Err(e.into()), @@ -382,7 +392,7 @@ pub fn run_streaming( } FilterMode::CaptureOnly => { for line in BufReader::new(stdout).lines().map_while(Result::ok) { - if raw_stdout.len() + line.len() < RAW_CAP { + if raw_stdout.len() + line.len() + 1 <= RAW_CAP { raw_stdout.push_str(&line); raw_stdout.push('\n'); } else if !capped { diff --git a/src/discover/lexer.rs b/src/discover/lexer.rs index a5ea114ab..8a126530a 100644 --- a/src/discover/lexer.rs +++ b/src/discover/lexer.rs @@ -258,6 +258,54 @@ fn flush_arg(tokens: &mut Vec, current: &mut String, offset: usize) } } +/// Split a shell command on operators (`&&`, `||`, `;`) and optionally pipes (`|`), +/// respecting quoted strings via the lexer. +/// +/// When `stop_at_pipe` is true, returns only segments before the first `|` +/// (used by command rewriting — only the left side of a pipe gets rewritten). +/// When false, splits through pipes too (used by permission checking — +/// every segment must be validated). +pub fn split_on_operators(cmd: &str, stop_at_pipe: bool) -> Vec<&str> { + let trimmed = cmd.trim(); + if trimmed.is_empty() { + return vec![]; + } + + let tokens = tokenize(trimmed); + let mut results = Vec::new(); + let mut seg_start: usize = 0; + + for tok in &tokens { + match tok.kind { + TokenKind::Operator => { + let segment = trimmed[seg_start..tok.offset].trim(); + if !segment.is_empty() { + results.push(segment); + } + seg_start = tok.offset + tok.value.len(); + } + TokenKind::Pipe => { + let segment = trimmed[seg_start..tok.offset].trim(); + if !segment.is_empty() { + results.push(segment); + } + if stop_at_pipe { + return results; + } + seg_start = tok.offset + tok.value.len(); + } + _ => {} + } + } + + let tail = trimmed[seg_start..].trim(); + if !tail.is_empty() { + results.push(tail); + } + + results +} + pub fn strip_quotes(s: &str) -> String { let chars: Vec = s.chars().collect(); if chars.len() >= 2 @@ -952,4 +1000,33 @@ mod tests { fn test_strip_quotes_mismatched() { assert_eq!(strip_quotes("\"hello'"), "\"hello'"); } + + #[test] + fn test_split_on_operators_stop_at_pipe() { + assert_eq!(split_on_operators("a | b | c", true), vec!["a"]); + assert_eq!(split_on_operators("a && b | c", true), vec!["a", "b"]); + } + + #[test] + fn test_split_on_operators_through_pipes() { + assert_eq!(split_on_operators("a | b | c", false), vec!["a", "b", "c"]); + assert_eq!( + split_on_operators("a && b | c ; d", false), + vec!["a", "b", "c", "d"] + ); + } + + #[test] + fn test_split_on_operators_quoted() { + assert_eq!( + split_on_operators(r#"echo "a && b" && cargo test"#, false), + vec![r#"echo "a && b""#, "cargo test"] + ); + } + + #[test] + fn test_split_on_operators_empty() { + assert!(split_on_operators("", false).is_empty()); + assert!(split_on_operators(" ", true).is_empty()); + } } diff --git a/src/discover/registry.rs b/src/discover/registry.rs index 8f21016c0..eaa54aa3e 100644 --- a/src/discover/registry.rs +++ b/src/discover/registry.rs @@ -3,7 +3,7 @@ use lazy_static::lazy_static; use regex::{Regex, RegexSet}; -use super::lexer::{tokenize, TokenKind}; +use super::lexer::{split_on_operators, tokenize, TokenKind}; use super::rules::{IGNORED_EXACT, IGNORED_PREFIXES, RULES}; /// Result of classifying a command. @@ -221,36 +221,7 @@ pub fn split_command_chain(cmd: &str) -> Vec<&str> { return vec![trimmed]; } - let tokens = tokenize(trimmed); - let mut results = Vec::new(); - let mut seg_start: usize = 0; - - for tok in &tokens { - match tok.kind { - TokenKind::Operator => { - let segment = trimmed[seg_start..tok.offset].trim(); - if !segment.is_empty() { - results.push(segment); - } - seg_start = tok.offset + tok.value.len(); - } - TokenKind::Pipe => { - let segment = trimmed[seg_start..tok.offset].trim(); - if !segment.is_empty() { - results.push(segment); - } - return results; - } - _ => {} - } - } - - let segment = trimmed[seg_start..].trim(); - if !segment.is_empty() { - results.push(segment); - } - - results + split_on_operators(trimmed, true) } /// Strip git global options before the subcommand (#163). diff --git a/src/hooks/hook_cmd.rs b/src/hooks/hook_cmd.rs index 1481fc218..5f002b7a9 100644 --- a/src/hooks/hook_cmd.rs +++ b/src/hooks/hook_cmd.rs @@ -275,53 +275,51 @@ fn audit_log_inner(action: &str, original: &str, rewritten: &str) -> Option<()> // ── Claude Code native hook ──────────────────────────────────── -/// Run the Claude Code PreToolUse hook natively. -pub fn run_claude() -> Result<()> { - let input = read_stdin_limited()?; - - let input = input.trim(); - if input.is_empty() { - return Ok(()); - } - - let v: Value = match serde_json::from_str(input) { - Ok(v) => v, - Err(e) => { - let _ = writeln!(io::stderr(), "[rtk hook] Failed to parse JSON input: {e}"); - return Ok(()); - } - }; +enum PayloadAction { + Rewrite { + cmd: String, + rewritten: String, + output: Value, + }, + Skip { + reason: &'static str, + cmd: String, + }, + Ignore, +} +fn process_claude_payload(v: &Value) -> PayloadAction { let cmd = match v .pointer("/tool_input/command") .and_then(|c| c.as_str()) .filter(|c| !c.is_empty()) { - Some(c) => c.to_string(), - None => return Ok(()), + Some(c) => c, + None => return PayloadAction::Ignore, }; - let verdict = permissions::check_command(&cmd); + let verdict = permissions::check_command(cmd); if verdict == PermissionVerdict::Deny { - audit_log("skip:deny_rule", &cmd, ""); - return Ok(()); + return PayloadAction::Skip { + reason: "skip:deny_rule", + cmd: cmd.to_string(), + }; } - let rewritten = match get_rewritten(&cmd) { + let rewritten = match get_rewritten(cmd) { Some(r) => r, None => { - audit_log("skip:no_match", &cmd, ""); - return Ok(()); + return PayloadAction::Skip { + reason: "skip:no_match", + cmd: cmd.to_string(), + } } }; - audit_log("rewrite", &cmd, &rewritten); - - // Clone original tool_input, replace only "command" let updated_input = { let mut ti = v.get("tool_input").cloned().unwrap_or_else(|| json!({})); if let Some(obj) = ti.as_object_mut() { - obj.insert("command".into(), Value::String(rewritten)); + obj.insert("command".into(), Value::String(rewritten.clone())); } ti }; @@ -332,7 +330,6 @@ pub fn run_claude() -> Result<()> { "updatedInput": updated_input }); - // Only include permissionDecision for Allow (not Ask) if verdict == PermissionVerdict::Allow { hook_output .as_object_mut() @@ -340,51 +337,55 @@ pub fn run_claude() -> Result<()> { .insert("permissionDecision".into(), json!("allow")); } - let output = json!({ "hookSpecificOutput": hook_output }); - let _ = writeln!(io::stdout(), "{output}"); - Ok(()) + PayloadAction::Rewrite { + cmd: cmd.to_string(), + rewritten, + output: json!({ "hookSpecificOutput": hook_output }), + } } -/// Process a Claude hook payload from a string (for testing). -#[cfg(test)] -fn run_claude_inner(input: &str) -> Option { - let v: Value = serde_json::from_str(input).ok()?; - - let cmd = v - .pointer("/tool_input/command") - .and_then(|c| c.as_str()) - .filter(|c| !c.is_empty())?; +/// Run the Claude Code PreToolUse hook natively. +pub fn run_claude() -> Result<()> { + let input = read_stdin_limited()?; - let verdict = permissions::check_command(cmd); - if verdict == PermissionVerdict::Deny { - return None; + let input = input.trim(); + if input.is_empty() { + return Ok(()); } - let rewritten = get_rewritten(cmd)?; - - let updated_input = { - let mut ti = v.get("tool_input").cloned().unwrap_or_else(|| json!({})); - if let Some(obj) = ti.as_object_mut() { - obj.insert("command".into(), Value::String(rewritten)); + let v: Value = match serde_json::from_str(input) { + Ok(v) => v, + Err(e) => { + let _ = writeln!(io::stderr(), "[rtk hook] Failed to parse JSON input: {e}"); + return Ok(()); } - ti }; - let mut hook_output = json!({ - "hookEventName": PRE_TOOL_USE_KEY, - "permissionDecisionReason": "RTK auto-rewrite", - "updatedInput": updated_input - }); - - if verdict == PermissionVerdict::Allow { - hook_output - .as_object_mut() - .unwrap() - .insert("permissionDecision".into(), json!("allow")); + match process_claude_payload(&v) { + PayloadAction::Rewrite { + cmd, + rewritten, + output, + } => { + audit_log("rewrite", &cmd, &rewritten); + let _ = writeln!(io::stdout(), "{output}"); + } + PayloadAction::Skip { reason, cmd } => { + audit_log(reason, &cmd, ""); + } + PayloadAction::Ignore => {} } - let output = json!({ "hookSpecificOutput": hook_output }); - Some(output.to_string()) + Ok(()) +} + +#[cfg(test)] +fn run_claude_inner(input: &str) -> Option { + let v: Value = serde_json::from_str(input).ok()?; + match process_claude_payload(&v) { + PayloadAction::Rewrite { output, .. } => Some(output.to_string()), + _ => None, + } } // ── Cursor native hook ───────────────────────────────────────── diff --git a/src/hooks/permissions.rs b/src/hooks/permissions.rs index e189d8aca..552fbcdee 100644 --- a/src/hooks/permissions.rs +++ b/src/hooks/permissions.rs @@ -1,5 +1,6 @@ use super::constants::{CLAUDE_DIR, SETTINGS_JSON, SETTINGS_LOCAL_JSON}; use crate::core::stream::exec_capture; +use crate::discover::lexer::split_on_operators; use serde_json::Value; use std::path::PathBuf; @@ -267,14 +268,8 @@ fn glob_matches(cmd: &str, pattern: &str) -> bool { true } -/// Split a compound shell command into individual segments. -/// -/// Splits on `&&`, `||`, `|`, and `;`. Not a full shell parser — handles common cases. fn split_compound_command(cmd: &str) -> Vec<&str> { - cmd.split("&&") - .flat_map(|s| s.split("||")) - .flat_map(|s| s.split(['|', ';'])) - .collect() + split_on_operators(cmd, false) } #[cfg(test)] @@ -391,6 +386,25 @@ mod tests { ); } + #[test] + fn test_quoted_operators_not_split() { + // "&&" inside quotes must NOT cause a split — old naive splitter got this wrong + let deny = vec!["git push --force".to_string()]; + assert_eq!( + check_command_with_rules(r#"echo "git push --force && danger""#, &deny, &[], &[]), + PermissionVerdict::Default + ); + } + + #[test] + fn test_pipe_segments_checked() { + let deny = vec!["rm -rf".to_string()]; + assert_eq!( + check_command_with_rules("cat file | rm -rf /", &deny, &[], &[]), + PermissionVerdict::Deny + ); + } + #[test] fn test_ask_verdict() { let ask = vec!["git push".to_string()]; From b6bc98a064d37dd6545cff8569dde64c0cf8c4f5 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 12 Apr 2026 14:04:00 +0200 Subject: [PATCH 15/44] Update CHANGELOG.md --- CHANGELOG.md | 39 --------------------------------------- 1 file changed, 39 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 07af09d07..1c1067489 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,45 +5,6 @@ All notable changes to rtk (Rust Token Killer) will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [0.35.0](https://github.com/rtk-ai/rtk/compare/v0.34.3...v0.35.0) (2026-04-06) - - -### Features - -* **aws:** expand CLI filters from 8 to 25 subcommands ([402c48e](https://github.com/rtk-ai/rtk/commit/402c48e66988e638a5b4f4dd193238fc1d0fe18f)) - - -### Bug Fixes - -* **cmd:** read/cat multiple file and consistent behavior ([3f58018](https://github.com/rtk-ai/rtk/commit/3f58018f4af1d7206457929cf80bb4534203c3ee)) -* **docs:** clean some docs + disclaimer ([deda44f](https://github.com/rtk-ai/rtk/commit/deda44f73607981f3d27ecc6341ce927aab34d37)) -* **gh:** pass through gh pr merge instead of canned response ([#938](https://github.com/rtk-ai/rtk/issues/938)) ([8465ca9](https://github.com/rtk-ai/rtk/commit/8465ca953fa9d70dcc971a941c19465d456eb7d4)) -* **gh:** pass through gh pr merge instead of canned response ([#938](https://github.com/rtk-ai/rtk/issues/938)) ([e1f2845](https://github.com/rtk-ai/rtk/commit/e1f2845df06a8d8b8325945dc4940ec5f530e4cc)) -* **git:** inherit stdin for commit and push to preserve SSH signing ([#733](https://github.com/rtk-ai/rtk/issues/733)) ([eefeae4](https://github.com/rtk-ai/rtk/commit/eefeae45656ff2607c3f519c8eae235e3f0fe411)) -* **git:** inherit stdin for commit and push to preserve SSH signing ([#733](https://github.com/rtk-ai/rtk/issues/733)) ([6cee6c6](https://github.com/rtk-ai/rtk/commit/6cee6c60b80f914ed9505e3925d85cadec43ab97)) -* **git:** preserve full diff hunk headers ([62f4452](https://github.com/rtk-ai/rtk/commit/62f445227679f3df293fe35e9b18cc5ab39d7963)) -* **git:** preserve full diff hunk headers ([09b3ff9](https://github.com/rtk-ai/rtk/commit/09b3ff9424e055f5fe25e535e5b60e077f8344f9)) -* **go:** avoid false build errors from download logs ([9c1cf2f](https://github.com/rtk-ai/rtk/commit/9c1cf2f403534fa7874638b1b983c2d7f918a185)) -* **go:** avoid false build errors from download logs ([d44fd3e](https://github.com/rtk-ai/rtk/commit/d44fd3e034208e3bcd59c2c46f7720eec4f10c98)) -* **go:** cover more build failure shapes ([2425ad6](https://github.com/rtk-ai/rtk/commit/2425ad68e5386d19e5ec9ff1ca151a6d2c9a56d3)) -* **go:** preserve failing test location context ([1481bc5](https://github.com/rtk-ai/rtk/commit/1481bc590924031456a6022510275c29c09e330e)) -* **go:** preserve failing test location context ([374fe64](https://github.com/rtk-ai/rtk/commit/374fe64cfbedcd676733973e81a63a6dfecbb1b7)) -* **go:** restore build error coverage ([1177c9c](https://github.com/rtk-ai/rtk/commit/1177c9c873ac63b6c0bcc9e1b664a705baa0ad7a)) -* **grep:** close subprocess stdin to prevent memory leak ([#897](https://github.com/rtk-ai/rtk/issues/897)) ([7217562](https://github.com/rtk-ai/rtk/commit/72175623551f40b581b4a7f6ed966c1e4a9c7358)) -* **grep:** close subprocess stdin to prevent memory leak ([#897](https://github.com/rtk-ai/rtk/issues/897)) ([09979cf](https://github.com/rtk-ai/rtk/commit/09979cf29701a1b775bcac761d24ec0e055d1bec)) -* **hook_check:** detect missing integrations ([9cf9ccc](https://github.com/rtk-ai/rtk/commit/9cf9ccc1ac39f8bba37e932c7d318a3aa7a34ae9)) -* **init:** remove opt-out instruction from telemetry message ([7571c8e](https://github.com/rtk-ai/rtk/commit/7571c8e101c41ee64c51e2bd64697f85f9142423)) -* **init:** remove telemetry info lines from init output ([7dbef2c](https://github.com/rtk-ai/rtk/commit/7dbef2ce00824d26f2057e4c3c76e429e2e23088)) -* **main:** kill zombie processes + path for rtk md ([d16fc6d](https://github.com/rtk-ai/rtk/commit/d16fc6dacbfec912c21522939b15b7bbd9719487)) -* **main:** kill zombie processes + path for rtk md + missing intergrations ([a919335](https://github.com/rtk-ai/rtk/commit/a919335519ed4a5259a212e56407cb312aa99bac)) -* **merge:** changelog conflicts ([d92c5d2](https://github.com/rtk-ai/rtk/commit/d92c5d264a49483c8d6079e04d946a79bc990a74)) -* **proxy:** kill child process on SIGINT/SIGTERM to prevent orphans ([d813919](https://github.com/rtk-ai/rtk/commit/d813919a24546e044e7844fc7ed05fef4ec24033)) -* **proxy:** kill child process on SIGINT/SIGTERM to prevent orphans ([3318510](https://github.com/rtk-ai/rtk/commit/33185101fc122d0c11a25a4e02ac9f3a7dc7e3bb)) -* **review:** address ChildGuard disarm, stdin dedup, hook masking ([d85fe33](https://github.com/rtk-ai/rtk/commit/d85fe3384b87c16fafd25ec7bcadbff6e69f3f1f)) -* **security:** default to ask when no permission rule matches ([#886](https://github.com/rtk-ai/rtk/issues/886)) ([158c745](https://github.com/rtk-ai/rtk/commit/158c74527f6591d372e40a78cd604d73a20649a9)) -* **security:** default to ask when no permission rule matches ([#886](https://github.com/rtk-ai/rtk/issues/886)) ([41a6c6b](https://github.com/rtk-ai/rtk/commit/41a6c6bf6da78a4754794fdc6a1469df2e327920)) -* **tracking:** use std::env::temp_dir() for compatibility (instead of unix tmp) ([e918661](https://github.com/rtk-ai/rtk/commit/e918661440d7b50321f0535032f52c5e87aaf3cb)) - ## [0.34.3](https://github.com/rtk-ai/rtk/compare/v0.34.2...v0.34.3) (2026-04-02) From 8adea26aeeacadd5e50037387c74de14c78b4fcf Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 12 Apr 2026 14:04:26 +0200 Subject: [PATCH 16/44] Update Cargo.toml --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index a766e9228..cb52a76bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rtk" -version = "0.35.0" +version = "0.34.3" edition = "2021" authors = ["Patrick Szymkowiak"] description = "Rust Token Killer - High-performance CLI proxy to minimize LLM token consumption" From a4ec3856752cd8e10a816ecf2baba3b0e6996ee5 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 12 Apr 2026 14:04:42 +0200 Subject: [PATCH 17/44] Update .release-please-manifest.json --- .release-please-manifest.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3a39fd8cf..b9091c583 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.35.0" + ".": "0.34.3" } From 5916ecd86fb319c2519a0b4fb2891309833a3bb4 Mon Sep 17 00:00:00 2001 From: Adrien Eppling Date: Tue, 14 Apr 2026 20:52:34 +0200 Subject: [PATCH 18/44] fix: rename ship.md to ship/SKILL.md to match develop --- .claude/skills/{ship.md => ship/SKILL.md} | 1 + 1 file changed, 1 insertion(+) rename .claude/skills/{ship.md => ship/SKILL.md} (99%) diff --git a/.claude/skills/ship.md b/.claude/skills/ship/SKILL.md similarity index 99% rename from .claude/skills/ship.md rename to .claude/skills/ship/SKILL.md index b774bcb42..66acf6181 100644 --- a/.claude/skills/ship.md +++ b/.claude/skills/ship/SKILL.md @@ -1,5 +1,6 @@ --- description: Build, commit, push & version bump workflow - automates the complete release cycle +allowed-tools: Read Write Edit Bash Grep Glob --- # Ship Release From dfc009a4d6c2f78cf37f48031e16c11500c0c9df Mon Sep 17 00:00:00 2001 From: Adrien Eppling Date: Tue, 14 Apr 2026 22:50:52 +0200 Subject: [PATCH 19/44] restore cursor and copilot hook scripts for backward compat --- hooks/copilot/test-rtk-rewrite.sh | 293 ++++++++++++++++++++++++++++++ hooks/cursor/rtk-rewrite.sh | 54 ++++++ 2 files changed, 347 insertions(+) create mode 100644 hooks/copilot/test-rtk-rewrite.sh create mode 100644 hooks/cursor/rtk-rewrite.sh diff --git a/hooks/copilot/test-rtk-rewrite.sh b/hooks/copilot/test-rtk-rewrite.sh new file mode 100644 index 000000000..f1cca9497 --- /dev/null +++ b/hooks/copilot/test-rtk-rewrite.sh @@ -0,0 +1,293 @@ +#!/usr/bin/env bash +# Test suite for rtk hook (cross-platform preToolUse handler). +# Feeds mock preToolUse JSON through `rtk hook` and verifies allow/deny decisions. +# +# Usage: bash hooks/test-copilot-rtk-rewrite.sh +# +# Copilot CLI input format: +# {"toolName":"bash","toolArgs":"{\"command\":\"...\"}"} +# Output on intercept: {"permissionDecision":"deny","permissionDecisionReason":"..."} +# +# VS Code Copilot Chat input format: +# {"tool_name":"Bash","tool_input":{"command":"..."}} +# Output on intercept: {"hookSpecificOutput":{"permissionDecision":"allow","updatedInput":{...}}} +# +# Output on pass-through: empty (exit 0) + +RTK="${RTK:-rtk}" +PASS=0 +FAIL=0 +TOTAL=0 + +# Colors +GREEN='\033[32m' +RED='\033[31m' +DIM='\033[2m' +RESET='\033[0m' + +# Build a Copilot CLI preToolUse input JSON +copilot_bash_input() { + local cmd="$1" + local tool_args + tool_args=$(jq -cn --arg cmd "$cmd" '{"command":$cmd}') + jq -cn --arg ta "$tool_args" '{"toolName":"bash","toolArgs":$ta}' +} + +# Build a VS Code Copilot Chat preToolUse input JSON +vscode_bash_input() { + local cmd="$1" + jq -cn --arg cmd "$cmd" '{"tool_name":"Bash","tool_input":{"command":$cmd}}' +} + +# Build a non-bash tool input +tool_input() { + local tool_name="$1" + jq -cn --arg t "$tool_name" '{"toolName":$t,"toolArgs":"{}"}' +} + +# Assert Copilot CLI: hook denies and reason contains the expected rtk command +test_deny() { + local description="$1" + local input_cmd="$2" + local expected_rtk="$3" + TOTAL=$((TOTAL + 1)) + + local output + output=$(copilot_bash_input "$input_cmd" | "$RTK" hook 2>/dev/null) || true + + local decision reason + decision=$(echo "$output" | jq -r '.permissionDecision // empty' 2>/dev/null) + reason=$(echo "$output" | jq -r '.permissionDecisionReason // empty' 2>/dev/null) + + if [ "$decision" = "deny" ] && echo "$reason" | grep -qF "$expected_rtk"; then + printf " ${GREEN}DENY${RESET} %s ${DIM}→ %s${RESET}\n" "$description" "$expected_rtk" + PASS=$((PASS + 1)) + else + printf " ${RED}FAIL${RESET} %s\n" "$description" + printf " expected decision: deny, reason containing: %s\n" "$expected_rtk" + printf " actual decision: %s\n" "$decision" + printf " actual reason: %s\n" "$reason" + FAIL=$((FAIL + 1)) + fi +} + +# Assert VS Code Copilot Chat: hook returns updatedInput (allow) with rewritten command +test_vscode_rewrite() { + local description="$1" + local input_cmd="$2" + local expected_rtk="$3" + TOTAL=$((TOTAL + 1)) + + local output + output=$(vscode_bash_input "$input_cmd" | "$RTK" hook 2>/dev/null) || true + + local decision updated_cmd + decision=$(echo "$output" | jq -r '.hookSpecificOutput.permissionDecision // empty' 2>/dev/null) + updated_cmd=$(echo "$output" | jq -r '.hookSpecificOutput.updatedInput.command // empty' 2>/dev/null) + + if [ "$decision" = "allow" ] && echo "$updated_cmd" | grep -qF "$expected_rtk"; then + printf " ${GREEN}REWRITE${RESET} %s ${DIM}→ %s${RESET}\n" "$description" "$updated_cmd" + PASS=$((PASS + 1)) + else + printf " ${RED}FAIL${RESET} %s\n" "$description" + printf " expected decision: allow, updatedInput containing: %s\n" "$expected_rtk" + printf " actual decision: %s\n" "$decision" + printf " actual updatedInput: %s\n" "$updated_cmd" + FAIL=$((FAIL + 1)) + fi +} + +# Assert the hook emits no output (pass-through) +test_allow() { + local description="$1" + local input="$2" + TOTAL=$((TOTAL + 1)) + + local output + output=$(echo "$input" | "$RTK" hook 2>/dev/null) || true + + if [ -z "$output" ]; then + printf " ${GREEN}PASS${RESET} %s ${DIM}→ (allow)${RESET}\n" "$description" + PASS=$((PASS + 1)) + else + local decision + decision=$(echo "$output" | jq -r '.permissionDecision // .hookSpecificOutput.permissionDecision // empty' 2>/dev/null) + printf " ${RED}FAIL${RESET} %s\n" "$description" + printf " expected: (no output)\n" + printf " actual: permissionDecision=%s\n" "$decision" + FAIL=$((FAIL + 1)) + fi +} + +echo "============================================" +echo " RTK Hook Test Suite (rtk hook)" +echo "============================================" +echo "" + +# ---- SECTION 1: Copilot CLI — commands that should be denied ---- +echo "--- Copilot CLI: intercepted (deny with rtk suggestion) ---" + +test_deny "git status" \ + "git status" \ + "rtk git status" + +test_deny "git log --oneline -10" \ + "git log --oneline -10" \ + "rtk git log" + +test_deny "git diff HEAD" \ + "git diff HEAD" \ + "rtk git diff" + +test_deny "cargo test" \ + "cargo test" \ + "rtk cargo test" + +test_deny "cargo clippy --all-targets" \ + "cargo clippy --all-targets" \ + "rtk cargo clippy" + +test_deny "cargo build" \ + "cargo build" \ + "rtk cargo build" + +test_deny "grep -rn pattern src/" \ + "grep -rn pattern src/" \ + "rtk grep" + +test_deny "gh pr list" \ + "gh pr list" \ + "rtk gh" + +echo "" + +# ---- SECTION 2: VS Code Copilot Chat — commands that should be rewritten via updatedInput ---- +echo "--- VS Code Copilot Chat: intercepted (updatedInput rewrite) ---" + +test_vscode_rewrite "git status" \ + "git status" \ + "rtk git status" + +test_vscode_rewrite "cargo test" \ + "cargo test" \ + "rtk cargo test" + +test_vscode_rewrite "gh pr list" \ + "gh pr list" \ + "rtk gh" + +echo "" + +# ---- SECTION 3: Pass-through cases ---- +echo "--- Pass-through (allow silently) ---" + +test_allow "Copilot CLI: already rtk: rtk git status" \ + "$(copilot_bash_input "rtk git status")" + +test_allow "Copilot CLI: already rtk: rtk cargo test" \ + "$(copilot_bash_input "rtk cargo test")" + +test_allow "Copilot CLI: heredoc" \ + "$(copilot_bash_input "cat <<'EOF' +hello +EOF")" + +test_allow "Copilot CLI: unknown command: htop" \ + "$(copilot_bash_input "htop")" + +test_allow "Copilot CLI: unknown command: echo" \ + "$(copilot_bash_input "echo hello world")" + +test_allow "Copilot CLI: non-bash tool: view" \ + "$(tool_input "view")" + +test_allow "Copilot CLI: non-bash tool: edit" \ + "$(tool_input "edit")" + +test_allow "VS Code: already rtk" \ + "$(vscode_bash_input "rtk git status")" + +test_allow "VS Code: non-bash tool: editFiles" \ + "$(jq -cn '{"tool_name":"editFiles"}')" + +echo "" + +# ---- SECTION 4: Output format assertions ---- +echo "--- Output format ---" + +# Copilot CLI output format +TOTAL=$((TOTAL + 1)) +raw_output=$(copilot_bash_input "git status" | "$RTK" hook 2>/dev/null) + +if echo "$raw_output" | jq . >/dev/null 2>&1; then + printf " ${GREEN}PASS${RESET} Copilot CLI: output is valid JSON\n" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} Copilot CLI: output is not valid JSON: %s\n" "$raw_output" + FAIL=$((FAIL + 1)) +fi + +TOTAL=$((TOTAL + 1)) +decision=$(echo "$raw_output" | jq -r '.permissionDecision') +if [ "$decision" = "deny" ]; then + printf " ${GREEN}PASS${RESET} Copilot CLI: permissionDecision == \"deny\"\n" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} Copilot CLI: expected \"deny\", got \"%s\"\n" "$decision" + FAIL=$((FAIL + 1)) +fi + +TOTAL=$((TOTAL + 1)) +reason=$(echo "$raw_output" | jq -r '.permissionDecisionReason') +if echo "$reason" | grep -qE '`rtk [^`]+`'; then + printf " ${GREEN}PASS${RESET} Copilot CLI: reason contains backtick-quoted rtk command ${DIM}→ %s${RESET}\n" "$reason" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} Copilot CLI: reason missing backtick-quoted command: %s\n" "$reason" + FAIL=$((FAIL + 1)) +fi + +# VS Code output format +TOTAL=$((TOTAL + 1)) +vscode_output=$(vscode_bash_input "git status" | "$RTK" hook 2>/dev/null) + +if echo "$vscode_output" | jq . >/dev/null 2>&1; then + printf " ${GREEN}PASS${RESET} VS Code: output is valid JSON\n" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} VS Code: output is not valid JSON: %s\n" "$vscode_output" + FAIL=$((FAIL + 1)) +fi + +TOTAL=$((TOTAL + 1)) +vscode_decision=$(echo "$vscode_output" | jq -r '.hookSpecificOutput.permissionDecision') +if [ "$vscode_decision" = "allow" ]; then + printf " ${GREEN}PASS${RESET} VS Code: hookSpecificOutput.permissionDecision == \"allow\"\n" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} VS Code: expected \"allow\", got \"%s\"\n" "$vscode_decision" + FAIL=$((FAIL + 1)) +fi + +TOTAL=$((TOTAL + 1)) +vscode_updated=$(echo "$vscode_output" | jq -r '.hookSpecificOutput.updatedInput.command') +if echo "$vscode_updated" | grep -q "^rtk "; then + printf " ${GREEN}PASS${RESET} VS Code: updatedInput.command starts with rtk ${DIM}→ %s${RESET}\n" "$vscode_updated" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} VS Code: updatedInput.command should start with rtk: %s\n" "$vscode_updated" + FAIL=$((FAIL + 1)) +fi + +echo "" + +# ---- SUMMARY ---- +echo "============================================" +if [ $FAIL -eq 0 ]; then + printf " ${GREEN}ALL $TOTAL TESTS PASSED${RESET}\n" +else + printf " ${RED}$FAIL FAILED${RESET} / $TOTAL total ($PASS passed)\n" +fi +echo "============================================" + +exit $FAIL diff --git a/hooks/cursor/rtk-rewrite.sh b/hooks/cursor/rtk-rewrite.sh new file mode 100644 index 000000000..4b80b260c --- /dev/null +++ b/hooks/cursor/rtk-rewrite.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# rtk-hook-version: 1 +# RTK Cursor Agent hook — rewrites shell commands to use rtk for token savings. +# Works with both Cursor editor and cursor-cli (they share ~/.cursor/hooks.json). +# Cursor preToolUse hook format: receives JSON on stdin, returns JSON on stdout. +# Requires: rtk >= 0.23.0, jq +# +# This is a thin delegating hook: all rewrite logic lives in `rtk rewrite`, +# which is the single source of truth (src/discover/registry.rs). +# To add or change rewrite rules, edit the Rust registry — not this file. + +if ! command -v jq &>/dev/null; then + echo "[rtk] WARNING: jq is not installed. Hook cannot rewrite commands. Install jq: https://jqlang.github.io/jq/download/" >&2 + exit 0 +fi + +if ! command -v rtk &>/dev/null; then + echo "[rtk] WARNING: rtk is not installed or not in PATH. Hook cannot rewrite commands. Install: https://github.com/rtk-ai/rtk#installation" >&2 + exit 0 +fi + +# Version guard: rtk rewrite was added in 0.23.0. +RTK_VERSION=$(rtk --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) +if [ -n "$RTK_VERSION" ]; then + MAJOR=$(echo "$RTK_VERSION" | cut -d. -f1) + MINOR=$(echo "$RTK_VERSION" | cut -d. -f2) + if [ "$MAJOR" -eq 0 ] && [ "$MINOR" -lt 23 ]; then + echo "[rtk] WARNING: rtk $RTK_VERSION is too old (need >= 0.23.0). Upgrade: cargo install rtk" >&2 + exit 0 + fi +fi + +INPUT=$(cat) +CMD=$(echo "$INPUT" | jq -r '.tool_input.command // empty') + +if [ -z "$CMD" ]; then + echo '{}' + exit 0 +fi + +# Delegate all rewrite logic to the Rust binary. +# rtk rewrite exits 1 when there's no rewrite — hook passes through silently. +REWRITTEN=$(rtk rewrite "$CMD" 2>/dev/null) || { echo '{}'; exit 0; } + +# No change — nothing to do. +if [ "$CMD" = "$REWRITTEN" ]; then + echo '{}' + exit 0 +fi + +jq -n --arg cmd "$REWRITTEN" '{ + "permission": "allow", + "updated_input": { "command": $cmd } +}' From 76e253b5dc27fea16969737a485537a1267d9105 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Tue, 14 Apr 2026 21:14:32 +0200 Subject: [PATCH 20/44] =?UTF-8?q?feat(mvn):=20add=20Maven=20(Java)=20filte?= =?UTF-8?q?r=20module=20=E2=80=94=20test,=20compile,=20checkstyle:check,?= =?UTF-8?q?=20dependency:tree?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds `rtk mvn` with four filters: - `mvn test` — state machine parser (Preamble → Testing → Summary → Done) that accumulates counts across T E S T S sections for multi-module builds and surefire+failsafe (`mvn verify`). - `mvn compile` — line filter routed also from `process-classes` / `test-compile` via a shared `COMPILE_LIKE_GOALS` tuple table. - `mvn checkstyle:check` — compact violation rewrite with Help-boilerplate strip. - `mvn dependency:tree` — boilerplate/duplicate strip, transitive collapse. Auto-detects `./mvnw` wrapper; falls back to system `mvn`. Other goals (e.g. `spring-boot:run`, `install`) stream through unchanged for safety with metrics-only tracking. Discover rules rewrite bare `mvn`/`mvnw` invocations to `rtk mvn`. Replaces the previous TOML filter `src/filters/mvn-build.toml`. 38 unit tests + 14 real-output fixtures covering pass/fail/multi-module cases; verify-fixture savings ≥ 90%. --- CHANGELOG.md | 4 + README.md | 4 + src/cmds/java/README.md | 14 + src/cmds/java/mod.rs | 1 + src/cmds/java/mvn_cmd.rs | 1858 +++++++++++++++++ src/cmds/mod.rs | 1 + src/core/toml_filter.rs | 11 +- src/discover/rules.rs | 6 +- src/filters/mvn-build.toml | 44 - src/main.rs | 45 + tests/fixtures/mvn_checkstyle_clean.txt | 33 + .../fixtures/mvn_checkstyle_clean_native.txt | 37 + tests/fixtures/mvn_checkstyle_violations.txt | 44 + tests/fixtures/mvn_compile_auth.txt | 132 ++ tests/fixtures/mvn_dep_tree_beacon.txt | 652 ++++++ tests/fixtures/mvn_dep_tree_conflicts.txt | 16 + tests/fixtures/mvn_dep_tree_large.txt | 142 ++ tests/fixtures/mvn_dep_tree_simple.txt | 22 + tests/fixtures/mvn_test_fail_auth.txt | 95 + tests/fixtures/mvn_test_large_suite.txt | 204 ++ tests/fixtures/mvn_test_many_failures.txt | 115 + tests/fixtures/mvn_test_multimodule.txt | 118 ++ tests/fixtures/mvn_test_pass_large_ansi.txt | 53 + tests/fixtures/mvn_test_pass_mavenmcp.txt | 35 + tests/fixtures/mvn_verify_auth.txt | 50 + 25 files changed, 3683 insertions(+), 53 deletions(-) create mode 100644 src/cmds/java/README.md create mode 100644 src/cmds/java/mod.rs create mode 100644 src/cmds/java/mvn_cmd.rs delete mode 100644 src/filters/mvn-build.toml create mode 100644 tests/fixtures/mvn_checkstyle_clean.txt create mode 100644 tests/fixtures/mvn_checkstyle_clean_native.txt create mode 100644 tests/fixtures/mvn_checkstyle_violations.txt create mode 100644 tests/fixtures/mvn_compile_auth.txt create mode 100644 tests/fixtures/mvn_dep_tree_beacon.txt create mode 100644 tests/fixtures/mvn_dep_tree_conflicts.txt create mode 100644 tests/fixtures/mvn_dep_tree_large.txt create mode 100644 tests/fixtures/mvn_dep_tree_simple.txt create mode 100644 tests/fixtures/mvn_test_fail_auth.txt create mode 100644 tests/fixtures/mvn_test_large_suite.txt create mode 100644 tests/fixtures/mvn_test_many_failures.txt create mode 100644 tests/fixtures/mvn_test_multimodule.txt create mode 100644 tests/fixtures/mvn_test_pass_large_ansi.txt create mode 100644 tests/fixtures/mvn_test_pass_mavenmcp.txt create mode 100644 tests/fixtures/mvn_verify_auth.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index a45309776..0d5c4e248 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Features + +* **mvn:** add Maven (Java) filter module — test, compile, checkstyle:check, dependency:tree ([#1089](https://github.com/rtk-ai/rtk/pull/1089)) + ### Bug Fixes * **git:** remove `-u` short alias from `--ultra-compact` to fix `git push -u` upstream tracking ([#1086](https://github.com/rtk-ai/rtk/issues/1086)) diff --git a/README.md b/README.md index 6228a689d..c0c6071e9 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,7 @@ rtk filters and compresses command outputs before they reach your LLM context. S | `git log` | 5x | 2,500 | 500 | -80% | | `git add/commit/push` | 8x | 1,600 | 120 | -92% | | `cargo test` / `npm test` | 5x | 25,000 | 2,500 | -90% | +| `mvn test` | 3x | 30,000 | 300 | -99% | | `ruff check` | 3x | 3,000 | 600 | -80% | | `pytest` | 4x | 8,000 | 800 | -90% | | `go test` | 3x | 6,000 | 600 | -90% | @@ -179,6 +180,7 @@ rtk go test # Go tests (NDJSON, -90%) rtk cargo test # Cargo tests (-90%) rtk rake test # Ruby minitest (-90%) rtk rspec # RSpec tests (JSON, -60%+) +rtk mvn test # Maven tests (-99%) rtk err # Filter errors only from any command rtk test # Generic test wrapper - failures only (-90%) ``` @@ -195,6 +197,8 @@ rtk cargo clippy # Cargo clippy (-80%) rtk ruff check # Python linting (JSON, -80%) rtk golangci-lint run # Go linting (JSON, -85%) rtk rubocop # Ruby linting (JSON, -60%+) +rtk mvn build # Maven build (-90%) +rtk mvn dependency:tree # Maven dependency tree (-60%+) ``` ### Package Managers diff --git a/src/cmds/java/README.md b/src/cmds/java/README.md new file mode 100644 index 000000000..3f7ffcd02 --- /dev/null +++ b/src/cmds/java/README.md @@ -0,0 +1,14 @@ +# Java Ecosystem + +> Part of [`src/cmds/`](../README.md) — see also [docs/contributing/TECHNICAL.md](../../../docs/contributing/TECHNICAL.md) + +## Specifics + +- **mvn_cmd.rs** handles Maven (`mvn`) and Maven Wrapper (`mvnw`) commands +- Auto-detects `mvnw` wrapper in project root; falls back to system `mvn` +- `mvn test` uses a state-machine parser (Preamble → Testing → Summary → Done) for 97-99%+ savings on real-world output +- `mvn compile` uses line filtering to strip `[INFO]` noise, download progress, JVM/native-access warnings, and plugin chatter (jOOQ codegen, Liquibase, npm/React builds, typescript-generator). Also routes `process-classes` and `test-compile` through the same filter (same noise profile) +- `mvn checkstyle:check` (aliased as `checkstyle`) compacts violation lines to `path:line:col [Rule] message`, strips mvn startup noise and Help-link boilerplate, keeps `N Checkstyle violations` summary and BUILD SUCCESS/FAILURE +- `mvn dependency:tree` strips "omitted for duplicate" lines, "version managed from" annotations, and collapses deep transitive branches +- Unknown goals stream via `cmd.status()` passthrough (safe for long-running goals like `spring-boot:run`); rare lifecycle phases (`package`, `install`, `verify`, `clean`, `deploy`) also passthrough — filtered only when the output shape matches compile +- Routing via Clap sub-enum with `#[command(external_subcommand)] Other` for unknown goals; compile-like and checkstyle goals received as `Other` are auto-re-dispatched by `route_goal` to the right filter diff --git a/src/cmds/java/mod.rs b/src/cmds/java/mod.rs new file mode 100644 index 000000000..4685e68f0 --- /dev/null +++ b/src/cmds/java/mod.rs @@ -0,0 +1 @@ +automod::dir!(pub "src/cmds/java"); diff --git a/src/cmds/java/mvn_cmd.rs b/src/cmds/java/mvn_cmd.rs new file mode 100644 index 000000000..7e23fcf7c --- /dev/null +++ b/src/cmds/java/mvn_cmd.rs @@ -0,0 +1,1858 @@ +//! Filters Maven (`mvn`) command output — test results, build errors. +//! +//! State machine parser for `mvn test` output with states: +//! Preamble -> Testing -> Summary -> Done. +//! Strips thousands of noise lines to compact failure reports (99%+ savings). + +use crate::core::runner; +use crate::core::tracking; +use crate::core::utils::{exit_code_from_status, resolved_command, strip_ansi, truncate}; +use anyhow::{Context, Result}; +use lazy_static::lazy_static; +use regex::Regex; +use std::ffi::OsString; +use std::fmt::Write as _; +use std::path::Path; + +const INFO_TAG: &str = "[INFO]"; +const ERROR_TAG: &str = "[ERROR]"; +const WARNING_TAG: &str = "[WARNING]"; + +lazy_static! { + static ref TESTS_RUN_RE: Regex = + Regex::new(r"Tests run:\s*(\d+),\s*Failures:\s*(\d+),\s*Errors:\s*(\d+),\s*Skipped:\s*(\d+)") + .unwrap(); + static ref FAILURE_HEADER_RE: Regex = + Regex::new(r"^\[ERROR\]\s+(\S+\.\S+)\s+--\s+Time elapsed:.*<<<\s+(FAILURE|ERROR)!") + .unwrap(); + static ref TOTAL_TIME_RE: Regex = + Regex::new(r"Total time:\s+(.+)") + .unwrap(); + static ref VERSION_MANAGED_RE: Regex = + Regex::new(r"\s*\(version managed from [^)]+\)") + .unwrap(); + /// Code generator config params: `dialect : POSTGRES_15` + /// Also matches parens/hyphens in keys: `interfaces (immutable) : false` + static ref CODEGEN_CONFIG_RE: Regex = + Regex::new(r"^[\w][\w\s()\-]*\s{2,}:(\s|$)") + .unwrap(); + /// Frontend bundle size lines: `257.55 kB build/static/js/main.js` + static ref BUNDLE_SIZE_RE: Regex = + Regex::new(r"^\d[\d.]*\s+[kKMG]?B\s") + .unwrap(); + /// Checkstyle violation lines: + /// `[ERROR] :[[,]] () : ` + /// (also matches `[WARN]` severity for plugins configured with warn level). + static ref CHECKSTYLE_VIOLATION_RE: Regex = + Regex::new(r"^\[(?:ERROR|WARN)\] (.+?):\[(\d+)(?:,(\d+))?\] \(\w+\) (\w+): (.+)$") + .unwrap(); + /// mvnd / maven 3.9+ extension-loader noise: + /// `[INFO] Loaded 22539 auto-discovered prefixes for remote repository central (...)` + static ref PREFIX_LOAD_RE: Regex = + Regex::new(r"Loaded\s+\d+\s+auto-discovered prefixes").unwrap(); +} + +/// JVM warning lines emitted by Java 24+ (restricted methods, native access, +/// terminally-deprecated Unsafe). These have NO `[INFO]/[ERROR]/[WARNING]` +/// prefix — Maven wrappers surface them as bare text. They are always noise +/// for our purposes. +const JVM_WARNING_PREFIXES: &[&str] = &[ + "WARNING: A restricted method", + "WARNING: java.lang.System::", + "WARNING: sun.misc.Unsafe", + "WARNING: Use --enable-native-access", + "WARNING: Restricted methods will be blocked", + "WARNING: A terminally deprecated", + "WARNING: Please consider reporting", +]; + +/// Returns true for mvn startup / JVM / os-detection noise that is not +/// command-specific (applies to compile, checkstyle, and most goals). +/// Expects a raw (non-trimmed) line or a trimmed line — both work. +fn is_mvn_startup_noise(line: &str) -> bool { + let t = line.trim_start(); + + // mvnd / maven 3.9+ extension-loader progress + if PREFIX_LOAD_RE.is_match(t) { + return true; + } + + // JVM restricted-method / native-access warnings (no Maven prefix) + for p in JVM_WARNING_PREFIXES { + if t.starts_with(p) { + return true; + } + } + + // os-maven-plugin detection output: `[INFO] os.detected.name: linux` etc. + if t.starts_with("[INFO] os.detected") { + return true; + } + + false +} + +/// Auto-detect mvnw wrapper; fall back to system `mvn`. +fn mvn_command() -> std::process::Command { + if Path::new("mvnw").exists() { + resolved_command("./mvnw") + } else { + resolved_command("mvn") + } +} + +/// Run `mvn test` with state-machine filtered output. +pub fn run_test(args: &[String], verbose: u8) -> Result { + let mut cmd = mvn_command(); + cmd.arg("test"); + + for arg in args { + cmd.arg(arg); + } + + if verbose > 0 { + eprintln!("Running: mvn test {}", args.join(" ")); + } + + runner::run_filtered( + cmd, + "mvn test", + &args.join(" "), + filter_mvn_test, + runner::RunOptions::with_tee("mvn_test"), + ) +} + +/// Run `mvn compile` with line-filtered output. +/// +/// `compile` is itself a Maven lifecycle phase (not a goal name we invented), +/// so no implicit default is added when `args` is empty — `mvn compile` runs +/// the compile phase directly. +pub fn run_compile(args: &[String], verbose: u8) -> Result { + run_compile_like("compile", args, verbose) +} + +/// Shared implementation for compile-phase-like goals: runs `mvn ` +/// through `filter_mvn_compile`. Used directly by `run_compile` and reused by +/// `run_other` to route `process-classes` / `test-compile` through the same +/// filter while preserving the original goal name in the invocation and in +/// the tracking label. +fn run_compile_like(goal: &str, args: &[String], verbose: u8) -> Result { + let mut cmd = mvn_command(); + cmd.arg(goal); + for arg in args { + cmd.arg(arg); + } + + if verbose > 0 { + eprintln!("Running: mvn {} {}", goal, args.join(" ")); + } + + let (tool_name, tee_label) = compile_like_labels(goal); + + runner::run_filtered( + cmd, + tool_name, + &args.join(" "), + filter_mvn_compile, + runner::RunOptions::with_tee(tee_label), + ) +} + +/// Run `mvn checkstyle:check` with compact output — strips mvn/JVM startup +/// noise, keeps violations and BUILD SUCCESS/FAILURE summary. +pub fn run_checkstyle(args: &[String], verbose: u8) -> Result { + let mut cmd = mvn_command(); + cmd.arg("checkstyle:check"); + for arg in args { + cmd.arg(arg); + } + + if verbose > 0 { + eprintln!("Running: mvn checkstyle:check {}", args.join(" ")); + } + + runner::run_filtered( + cmd, + "mvn checkstyle:check", + &args.join(" "), + filter_mvn_checkstyle, + runner::RunOptions::with_tee("mvn_checkstyle"), + ) +} + +/// Run `mvn dependency:tree` with filtered output — strips duplicates and boilerplate. +pub fn run_dep_tree(args: &[String], verbose: u8) -> Result { + let mut cmd = mvn_command(); + cmd.arg("dependency:tree"); + + for arg in args { + cmd.arg(arg); + } + + if verbose > 0 { + eprintln!("Running: mvn dependency:tree {}", args.join(" ")); + } + + runner::run_filtered( + cmd, + "mvn dependency:tree", + &args.join(" "), + filter_mvn_dep_tree, + runner::RunOptions::with_tee("mvn_dep_tree"), + ) +} + +/// Goals whose output looks like `mvn compile` (same noise profile: plugin +/// codegen, npm lifecycle, Liquibase, Docker). Tuples are +/// `(goal, tool_name, tee_label)` — single source of truth for routing, +/// tracking labels, and tee filenames. +const COMPILE_LIKE_GOALS: &[(&str, &str, &str)] = &[ + ("compile", "mvn compile", "mvn_compile"), + ("process-classes", "mvn process-classes", "mvn_process_classes"), + ("test-compile", "mvn test-compile", "mvn_test_compile"), +]; + +/// Look up the `(tool_name, tee_label)` pair for a compile-like goal. Callers +/// are gated on `route_goal` / `COMPILE_LIKE_GOALS`, so the fallback is only +/// reached if that invariant is violated. +fn compile_like_labels(goal: &str) -> (&'static str, &'static str) { + for &(g, tool, tee) in COMPILE_LIKE_GOALS { + if g == goal { + return (tool, tee); + } + } + ("mvn compile", "mvn_compile") +} + +/// Routing decision for a raw mvn subcommand seen on `run_other` — i.e. the +/// first positional arg after `rtk mvn`. Pure function, easy to unit-test. +#[derive(Debug, PartialEq, Eq)] +enum GoalRouting { + /// Re-dispatch to `run_compile` (filter_mvn_compile). + Compile, + /// Re-dispatch to `run_checkstyle` (filter_mvn_checkstyle). + Checkstyle, + /// Stream unchanged via `status()`; tracked for metrics only. + Passthrough, +} + +fn route_goal(subcommand: &str) -> GoalRouting { + if COMPILE_LIKE_GOALS.iter().any(|(g, _, _)| *g == subcommand) { + return GoalRouting::Compile; + } + if subcommand == "checkstyle:check" || subcommand == "checkstyle" { + return GoalRouting::Checkstyle; + } + GoalRouting::Passthrough +} + +/// Convert `args[1..]` into `Vec`, lossy-decoding any non-UTF-8 bytes. +/// The subcommand (args[0]) is stripped so callers can re-dispatch to a +/// `run_*` function that prepends its own goal name. +fn trailing_args(args: &[OsString]) -> Vec { + args.iter() + .skip(1) + .map(|a| a.to_string_lossy().into_owned()) + .collect() +} + +/// Handles mvn subcommands not matched by dedicated Clap variants. +/// Compile-like goals go through `filter_mvn_compile`; `checkstyle` and +/// `checkstyle:check` go through `filter_mvn_checkstyle`; everything else +/// streams directly via `status()` (safe for long-running goals like +/// `spring-boot:run`, and metric-only for rare ones like `package`). +pub fn run_other(args: &[OsString], verbose: u8) -> Result { + if args.is_empty() { + anyhow::bail!("mvn: no subcommand specified"); + } + + let subcommand = args[0].to_string_lossy(); + + if verbose > 0 { + eprintln!("Running: mvn {} ...", subcommand); + } + + match route_goal(&subcommand) { + GoalRouting::Compile => { + return run_compile_like(&subcommand, &trailing_args(args), verbose); + } + GoalRouting::Checkstyle => { + return run_checkstyle(&trailing_args(args), verbose); + } + GoalRouting::Passthrough => {} + } + + // Everything else: passthrough with streaming (safe for spring-boot:run etc.) + let timer = tracking::TimedExecution::start(); + + let mut cmd = mvn_command(); + for arg in args { + cmd.arg(arg); + } + + let status = cmd + .status() + .with_context(|| format!("Failed to run mvn {}", subcommand))?; + + let args_str = tracking::args_display(args); + timer.track_passthrough( + &format!("mvn {}", args_str), + &format!("rtk mvn {} (passthrough)", args_str), + ); + + Ok(exit_code_from_status(&status, "mvn")) +} + +// --------------------------------------------------------------------------- +// State machine parser for mvn test output +// --------------------------------------------------------------------------- + +const MAX_DETAIL_LINES: usize = 3; +const MAX_FAILURES_SHOWN: usize = 10; +const MAX_LINE_LENGTH: usize = 200; + +#[derive(Debug, PartialEq)] +enum TestParseState { + Preamble, + Testing, + Summary, + Done, +} + +#[derive(Default)] +struct TestCounts { + run: u32, + failures: u32, + errors: u32, + skipped: u32, +} + +impl TestCounts { + fn add(&mut self, other: &Self) { + self.run += other.run; + self.failures += other.failures; + self.errors += other.errors; + self.skipped += other.skipped; + } +} + +struct FailureEntry { + name: String, + details: Vec, +} + +/// Parse the four count fields from a `TESTS_RUN_RE` captures. The regex +/// guarantees four numeric groups so defaulting to 0 is only a safety net. +fn parse_counts(caps: ®ex::Captures) -> TestCounts { + TestCounts { + run: caps.get(1).map_or(0, |m| m.as_str().parse().unwrap_or(0)), + failures: caps.get(2).map_or(0, |m| m.as_str().parse().unwrap_or(0)), + errors: caps.get(3).map_or(0, |m| m.as_str().parse().unwrap_or(0)), + skipped: caps.get(4).map_or(0, |m| m.as_str().parse().unwrap_or(0)), + } +} + +/// Filter `mvn test` output using a state machine parser. +/// +/// States: Preamble -> Testing -> Summary -> Done +/// - Preamble: skip everything before "T E S T S" marker +/// - Testing: collect failure details from [ERROR] headers and assertion lines +/// - Summary: parse final "Tests run:" line, BUILD SUCCESS/FAILURE, Total time +/// - Done: stop at Help boilerplate +fn filter_mvn_test(output: &str) -> String { + let clean = strip_ansi(output); + let mut state = TestParseState::Preamble; + + let mut failures: Vec = Vec::with_capacity(MAX_FAILURES_SHOWN); + let mut current_failure: Option = None; + + let mut cumulative = TestCounts::default(); + let mut section: Option = None; + let mut total_time: Option = None; + let mut total_failures_seen: usize = 0; + + for line in clean.lines() { + let trimmed = line.trim(); + let stripped = strip_maven_prefix(trimmed); + + // Global transition: T E S T S marker resets to Testing from any state + // (multi-module builds emit this marker per module) + if stripped.contains("T E S T S") { + if let Some(s) = section.take() { + cumulative.add(&s); + } + state = TestParseState::Testing; + continue; + } + + match state { + TestParseState::Preamble => {} + TestParseState::Testing => { + if stripped == "Results:" { + if let Some(f) = current_failure.take() { + total_failures_seen += 1; + if failures.len() < MAX_FAILURES_SHOWN { + failures.push(f); + } + } + state = TestParseState::Summary; + continue; + } + + if let Some(caps) = FAILURE_HEADER_RE.captures(trimmed) { + if let Some(f) = current_failure.take() { + total_failures_seen += 1; + if failures.len() < MAX_FAILURES_SHOWN { + failures.push(f); + } + } + let test_name = caps.get(1).map_or("", |m| m.as_str()).to_string(); + current_failure = Some(FailureEntry { + name: test_name, + details: Vec::new(), + }); + continue; + } + + // Per-plugin summary line inside the Testing block: + // "Tests run: N, Failures: N, Errors: N, Skipped: N" with no + // "-- in " suffix. Priority over any later Summary-state + // match so that the reactor aggregate (which appears after the + // LAST module's Summary block in multi-module builds) does not + // overwrite the real per-module total. + if !trimmed.contains("-- in") { + if let Some(caps) = TESTS_RUN_RE.captures(stripped) { + section = Some(parse_counts(&caps)); + continue; + } + } + + if let Some(ref mut f) = current_failure { + if f.details.len() >= MAX_DETAIL_LINES { + continue; + } + if is_framework_frame(stripped) + || is_maven_boilerplate(trimmed) + || stripped.is_empty() + || (trimmed.starts_with(ERROR_TAG) && stripped.contains("<<<")) + { + continue; + } + f.details.push(stripped.to_string()); + } + } + TestParseState::Summary => { + if is_maven_boilerplate(trimmed) || stripped.starts_with("Failures:") { + continue; + } + + if section.is_none() { + if let Some(caps) = TESTS_RUN_RE.captures(stripped) { + section = Some(parse_counts(&caps)); + } + } + + if let Some(caps) = TOTAL_TIME_RE.captures(stripped) { + total_time = Some(caps.get(1).map_or("", |m| m.as_str()).trim().to_string()); + state = TestParseState::Done; + } + } + TestParseState::Done => break, + } + } + + if let Some(s) = section.take() { + cumulative.add(&s); + } + + if state == TestParseState::Preamble { + return "mvn test: no tests run".to_string(); + } + + let counts = cumulative; + let time_str = total_time.as_deref().unwrap_or("?"); + let has_failures = counts.failures > 0 || counts.errors > 0; + + if !has_failures { + let passed = counts.run.saturating_sub(counts.skipped); + let mut summary = format!("mvn test: {} passed", passed); + if counts.skipped > 0 { + summary.push_str(&format!(", {} skipped", counts.skipped)); + } + summary.push_str(&format!(" ({})", time_str)); + return summary; + } + + let failed_count = counts.failures + counts.errors; + let mut result = format!("mvn test: {} run, {} failed", counts.run, failed_count); + if counts.skipped > 0 { + result.push_str(&format!(", {} skipped", counts.skipped)); + } + result.push_str(&format!(" ({})\n", time_str)); + + result.push_str("BUILD FAILURE\n"); + + if !failures.is_empty() { + result.push_str("\nFailures:\n"); + } + for (i, failure) in failures.iter().enumerate() { + writeln!(result, "{}. {}", i + 1, failure.name).unwrap(); + for detail in &failure.details { + writeln!(result, " {}", truncate(detail, MAX_LINE_LENGTH)).unwrap(); + } + } + if total_failures_seen > MAX_FAILURES_SHOWN { + writeln!( + result, + "\n... +{} more failures", + total_failures_seen - MAX_FAILURES_SHOWN + ) + .unwrap(); + } + + result.trim().to_string() +} + +/// Strip [INFO], [ERROR], [WARNING] prefixes from Maven output lines. +/// Expects pre-trimmed input from callers. +fn strip_maven_prefix(line: &str) -> &str { + for tag in [INFO_TAG, ERROR_TAG, WARNING_TAG] { + if let Some(rest) = line.strip_prefix(tag) { + return rest.trim_start(); + } + } + line +} + +/// Returns true for Java framework stack frames that should be stripped. +/// Expects pre-trimmed input (callers pass `stripped` or `trimmed`). +fn is_framework_frame(line: &str) -> bool { + let check = line.strip_prefix("at ").unwrap_or(line); + + const FRAMEWORK_PREFIXES: &[&str] = &[ + "org.apache.maven.", + "org.junit.platform.", + "org.junit.jupiter.", + "org.codehaus.plexus.", + "java.base/", + "sun.reflect.", + "jdk.internal.", + ]; + + for prefix in FRAMEWORK_PREFIXES { + if check.starts_with(prefix) { + return true; + } + } + + // "... N more" truncation markers + line.starts_with("...") && line.contains("more") +} + +/// Returns true for Maven boilerplate lines that should be stripped. +/// Expects pre-trimmed input from callers. +fn is_maven_boilerplate(line: &str) -> bool { + // Empty [ERROR] or [INFO] lines + if line == ERROR_TAG || line == INFO_TAG || line == WARNING_TAG { + return true; + } + + let stripped = strip_maven_prefix(line); + + // Separator lines (dashes) + if stripped.starts_with("---") && stripped.chars().all(|c| c == '-' || c.is_whitespace()) { + return true; + } + + const BOILERPLATE_PATTERNS: &[&str] = &[ + "-> [Help", + "http://cwiki.apache.org", + "https://cwiki.apache.org", + "surefire-reports", + "Re-run Maven", + "re-run Maven", + "full stack trace", + "enable verbose output", + "See dump files", + "Failed to execute goal", + "There are test failures", + ]; + + for pattern in BOILERPLATE_PATTERNS { + if stripped.contains(pattern) { + return true; + } + } + + false +} + +// --------------------------------------------------------------------------- +// Line filter for mvn compile output +// --------------------------------------------------------------------------- + +/// Filter `mvn compile` (and compile-like goals such as `process-classes`, +/// `test-compile`) output — strip [INFO] noise, keep errors and summary. +fn filter_mvn_compile(output: &str) -> String { + let clean = strip_ansi(output); + let result_lines: Vec<&str> = clean + .lines() + .map(str::trim) + .filter(|line| should_keep_compile_line(line)) + .collect(); + + if result_lines.is_empty() { + return "mvn: ok".to_string(); + } + + result_lines.join("\n") +} + +const INFO_NOISE_PATTERNS: &[&str] = &[ + "---", + "===", + "Building ", + "Downloading ", + "Downloaded ", + "Scanning ", + "Compiling ", + "Recompiling ", + "Nothing to compile", + "Using auto detected", + "Loaded ", + "Finished at:", + "from pom.xml", + "Copying ", + "argLine set to", + "Migration completed", + "Inferring ", + "No bool { + if line.is_empty() { + return false; + } + + let stripped = strip_maven_prefix(line); + + // Keep error lines + if line.starts_with(ERROR_TAG) { + return !is_maven_boilerplate(line); + } + + // Keep BUILD SUCCESS/FAILURE + if stripped.contains("BUILD SUCCESS") || stripped.contains("BUILD FAILURE") { + return true; + } + + // Keep Total time + if TOTAL_TIME_RE.is_match(stripped) { + return true; + } + + // Strip [INFO] noise + if line.starts_with(INFO_TAG) { + if stripped.is_empty() { + return false; + } + + if stripped.starts_with("[stdout]") || stripped.starts_with("[stderr]") { + return false; + } + + // npm lifecycle script lines: "> my-app@1.0.0 build" + if stripped.starts_with("> ") { + return false; + } + + for pattern in INFO_NOISE_PATTERNS { + if stripped.contains(pattern) { + return false; + } + } + + if stripped.contains("deprecated") || stripped.contains("WARNING") { + return false; + } + + // Code generator config params and bundle size lines (regex — slower, run last) + if CODEGEN_CONFIG_RE.is_match(stripped) || BUNDLE_SIZE_RE.is_match(stripped) { + return false; + } + + return true; + } + + // Strip [WARNING] lines for build filter + if line.starts_with(WARNING_TAG) { + return false; + } + + for pattern in BARE_TEXT_NOISE { + if line.contains(pattern) { + return false; + } + } + + // Keep anything else (compilation errors without prefix, etc.) + true +} + +// --------------------------------------------------------------------------- +// Line filter for mvn checkstyle:check output +// --------------------------------------------------------------------------- + +/// Maven "Help" footer emitted on BUILD FAILURE. These come prefixed with +/// `[ERROR]` but are not actionable for the user — just pointers to wiki +/// pages. They are distinct from real `[ERROR]` violations, so we match by +/// substring after stripping the prefix. +const CHECKSTYLE_HELP_BOILERPLATE: &[&str] = &[ + "Failed to execute goal", + "To see the full stack trace", + "Re-run Maven using", + "For more information about the errors", + "[Help 1]", + "[Help 2]", + "MojoFailureException", + "cwiki.apache.org", +]; + +/// Filter `mvn checkstyle:check` output: +/// - strip ANSI codes, mvn/JVM/os-detection startup noise +/// - strip Maven model problem WARNING block (10 stock lines) +/// - strip `[INFO] Scanning / Building / ---…---` separators +/// - keep violation lines, rewritten compactly: +/// ` path:line:col [RuleName] message` +/// - keep `There are N errors reported by Checkstyle` and +/// `You have N Checkstyle violations` summaries +/// - keep `BUILD SUCCESS` / `BUILD FAILURE` and `Total time` +/// - strip trailing Help-link boilerplate +fn filter_mvn_checkstyle(output: &str) -> String { + let clean = strip_ansi(output); + let mut result: Vec = Vec::new(); + + for raw in clean.lines() { + // Drop cross-cutting startup noise first + if is_mvn_startup_noise(raw) { + continue; + } + + let line = raw.trim(); + if line.is_empty() { + continue; + } + + // Violations: rewrite compactly + if let Some(caps) = CHECKSTYLE_VIOLATION_RE.captures(line) { + let path = &caps[1]; + let lineno = &caps[2]; + let col = caps.get(3).map(|m| m.as_str()).unwrap_or(""); + let rule = &caps[4]; + let msg = &caps[5]; + let compact = if col.is_empty() { + format!(" {}:{} [{}] {}", path, lineno, rule, msg) + } else { + format!(" {}:{}:{} [{}] {}", path, lineno, col, rule, msg) + }; + result.push(compact); + continue; + } + + let stripped = strip_maven_prefix(line); + + // Drop Help-link boilerplate emitted after BUILD FAILURE + if line.starts_with(ERROR_TAG) + && CHECKSTYLE_HELP_BOILERPLATE + .iter() + .any(|p| stripped.contains(p)) + { + continue; + } + + // Keep [INFO] summary & result lines + if line.starts_with(INFO_TAG) { + if stripped.is_empty() { + continue; + } + + // Keep: N-errors / N-violations / BUILD SUCCESS|FAILURE / Total time + if stripped.contains("Checkstyle violations") + || stripped.contains("reported by Checkstyle") + || stripped.contains("BUILD SUCCESS") + || stripped.contains("BUILD FAILURE") + || TOTAL_TIME_RE.is_match(stripped) + { + result.push(stripped.to_string()); + continue; + } + + // Drop everything else: Scanning, Building, separators, plugin + // banners, `from pom.xml`, `Finished at:`, etc. These match + // `is_maven_boilerplate` or known noise words. + continue; + } + + // Strip Maven model WARNING block (empty and boilerplate WARNINGs) + if line.starts_with(WARNING_TAG) { + continue; + } + + // Bare `[ERROR]` continuation (e.g., blank separator between help blocks) + if line == ERROR_TAG { + continue; + } + + // Anything else (e.g., unexpected bare errors not matching the rule + // regex) — keep, in the spirit of the fallback principle. + result.push(line.to_string()); + } + + if result.is_empty() { + return "mvn checkstyle: ok".to_string(); + } + + result.join("\n") +} + +// --------------------------------------------------------------------------- +// Line filter for mvn dependency:tree output +// --------------------------------------------------------------------------- + +/// Filter `mvn dependency:tree` — strip Maven boilerplate, omitted duplicates, +/// and "version managed" annotations. Keep tree structure and conflicts. +/// Returns the tree depth of a dependency line (0 = root, 1 = direct dep, 2+ = transitive). +/// Counts tree-drawing segments: each `| `, `+- `, `\- `, or ` ` at the start adds one level. +fn dep_tree_depth(line: &str) -> usize { + let mut depth = 0; + let bytes = line.as_bytes(); + let mut i = 0; + while i + 2 < bytes.len() { + match (bytes[i], bytes[i + 1], bytes[i + 2]) { + (b'|', b' ', b' ') | (b'+', b'-', b' ') | (b'\\', b'-', b' ') | (b' ', b' ', b' ') => { + depth += 1; + i += 3; + } + _ => break, + } + } + depth +} + +fn filter_mvn_dep_tree(output: &str) -> String { + let clean = strip_ansi(output); + + // First pass: collect clean tree lines + let mut tree_lines: Vec = Vec::new(); + for line in clean.lines() { + let trimmed = line.trim(); + + if trimmed.is_empty() || is_maven_boilerplate(trimmed) { + continue; + } + + let stripped = strip_maven_prefix(trimmed); + + if trimmed.starts_with(WARNING_TAG) { + continue; + } + if trimmed.starts_with(INFO_TAG) + && (stripped.is_empty() + || stripped.starts_with("Scanning ") + || stripped.starts_with("Building ") + || stripped.starts_with("Loaded ") + || stripped.contains("from pom.xml") + || stripped.contains("BUILD SUCCESS") + || stripped.contains("BUILD FAILURE") + || stripped.starts_with("Total time:") + || stripped.starts_with("Finished at:")) + { + continue; + } + + if stripped.contains("omitted for duplicate") { + continue; + } + + let cleaned = if stripped.contains("version managed from") { + VERSION_MANAGED_RE.replace_all(stripped, "").into_owned() + } else { + stripped.to_string() + }; + + tree_lines.push(cleaned); + } + + if tree_lines.is_empty() { + return "mvn dependency:tree: no output".to_string(); + } + + // Second pass: collapse transitive deps (depth 2+) into counts on their parent + let mut result_lines: Vec = Vec::new(); + let mut i = 0; + while i < tree_lines.len() { + let depth = dep_tree_depth(&tree_lines[i]); + + if depth <= 1 { + // Root or direct dep — count transitive children + let mut transitive_count = 0; + let mut j = i + 1; + while j < tree_lines.len() { + let child_depth = dep_tree_depth(&tree_lines[j]); + if child_depth <= depth { + break; + } + if child_depth >= depth + 2 { + transitive_count += 1; + } + j += 1; + } + + if depth == 1 && transitive_count > 0 { + result_lines.push(format!( + "{} ({} transitive)", + tree_lines[i], transitive_count + )); + } else { + result_lines.push(tree_lines[i].clone()); + } + } + // depth 2+ lines are skipped (counted above) + i += 1; + } + + result_lines.join("\n") +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::core::utils::count_tokens; + + #[test] + fn test_test_counts_add() { + let mut a = TestCounts { + run: 10, + failures: 1, + errors: 2, + skipped: 3, + }; + let b = TestCounts { + run: 100, + failures: 20, + errors: 30, + skipped: 40, + }; + a.add(&b); + assert_eq!(a.run, 110); + assert_eq!(a.failures, 21); + assert_eq!(a.errors, 32); + assert_eq!(a.skipped, 43); + } + + #[test] + fn test_filter_pass_output() { + let input = include_str!("../../../tests/fixtures/mvn_test_pass_mavenmcp.txt"); + let output = filter_mvn_test(input); + assert!( + output.contains("mvn test:"), + "should contain summary prefix" + ); + assert!(output.contains("183 passed"), "should show 183 passed"); + assert!(output.contains("4.748 s"), "should contain total time"); + assert!( + !output.contains("[INFO]"), + "should not contain raw [INFO] prefix" + ); + } + + #[test] + fn test_filter_fail_output() { + let input = include_str!("../../../tests/fixtures/mvn_test_fail_auth.txt"); + let output = filter_mvn_test(input); + assert!( + output.contains("5 run, 2 failed"), + "should show run/failed counts, got: {}", + output + ); + assert!(output.contains("23.819 s"), "should contain total time"); + assert!( + output.contains("EmailParserTest.should_extract_domain_from_email"), + "should list first failure" + ); + assert!( + output.contains("ScoreTypeTest.shouldMapToRole"), + "should list second failure" + ); + assert!( + output.contains("broken.example.com"), + "should include assertion details" + ); + assert!( + !output.contains("surefire-reports"), + "should strip boilerplate" + ); + assert!( + !output.contains("cwiki.apache.org"), + "should strip help links" + ); + } + + #[test] + fn test_pass_savings() { + let input = include_str!("../../../tests/fixtures/mvn_test_pass_mavenmcp.txt"); + let output = filter_mvn_test(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 90.0, + "mvn test pass: expected >=90% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens, + ); + } + + #[test] + fn test_fail_savings() { + let input = include_str!("../../../tests/fixtures/mvn_test_fail_auth.txt"); + let output = filter_mvn_test(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 60.0, + "mvn test fail: expected >=60% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens, + ); + } + + #[test] + fn test_filter_large_suite() { + let input = include_str!("../../../tests/fixtures/mvn_test_large_suite.txt"); + let output = filter_mvn_test(input); + assert!( + output.contains("3262 run, 23 failed"), + "should show run/failed counts, got: {}", + output + ); + assert!( + output.contains("+13 more failures"), + "should cap at 10 and show remaining" + ); + assert!( + output.contains("SearchReadModelTest"), + "should list assertion failures" + ); + assert!( + output.contains("PatchableFieldTest"), + "should list compilation errors" + ); + } + + #[test] + fn test_large_suite_savings() { + let input = include_str!("../../../tests/fixtures/mvn_test_large_suite.txt"); + let output = filter_mvn_test(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 60.0, + "mvn test large suite: expected >=60% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens, + ); + } + + #[test] + fn test_empty_input() { + let output = filter_mvn_test(""); + assert_eq!(output, "mvn test: no tests run"); + } + + #[test] + fn test_filter_many_failures_output() { + let input = include_str!("../../../tests/fixtures/mvn_test_many_failures.txt"); + let output = filter_mvn_test(input); + assert!( + output.contains("28 run, 28 failed"), + "should show total run/failed counts, got: {}", + output + ); + assert!( + output.contains("+4 more failures"), + "should cap at 10 and show remaining count" + ); + } + + #[test] + fn test_many_failures_savings() { + let input = include_str!("../../../tests/fixtures/mvn_test_many_failures.txt"); + let output = filter_mvn_test(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 60.0, + "mvn test many failures: expected >=60% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens, + ); + } + + #[test] + fn test_filter_multimodule_output() { + let input = include_str!("../../../tests/fixtures/mvn_test_multimodule.txt"); + let output = filter_mvn_test(input); + assert!( + output.contains("860 run, 4 failed"), + "should show total run/failed across modules, got: {}", + output + ); + assert!( + output.contains("GitDiffReaderTest.shouldBuildDiff"), + "should list failure from services module" + ); + assert!( + output.contains("ServiceUnavailableException"), + "should include error details" + ); + assert!( + output.contains("01:31 min"), + "should contain total time" + ); + } + + #[test] + fn test_multimodule_savings() { + let input = include_str!("../../../tests/fixtures/mvn_test_multimodule.txt"); + let output = filter_mvn_test(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 60.0, + "mvn test multimodule: expected >=60% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens, + ); + } + + #[test] + fn test_filter_pass_large_ansi() { + let input = include_str!("../../../tests/fixtures/mvn_test_pass_large_ansi.txt"); + let output = filter_mvn_test(input); + assert!( + output.contains("950 passed"), + "should show 950 passed (959-9 skipped), got: {}", + output + ); + assert!( + output.contains("9 skipped"), + "should show 9 skipped" + ); + assert!( + output.contains("01:32 min"), + "should contain total time" + ); + assert!( + !output.contains("PortUnreachableException"), + "should strip app log noise" + ); + assert!( + !output.contains("[stdout]"), + "should strip [stdout] lines" + ); + assert!( + !output.contains("liquibase"), + "should strip liquibase stderr" + ); + } + + #[test] + fn test_pass_large_ansi_savings() { + let input = include_str!("../../../tests/fixtures/mvn_test_pass_large_ansi.txt"); + let output = filter_mvn_test(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 95.0, + "mvn test large ANSI pass: expected >=95% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens, + ); + } + + #[test] + fn test_no_test_section() { + let input = "[INFO] Building my-project 1.0\n[INFO] BUILD SUCCESS\n"; + let output = filter_mvn_test(input); + assert_eq!(output, "mvn test: no tests run"); + } + + // --- dependency:tree tests --- + + #[test] + fn test_dep_tree_simple() { + let input = include_str!("../../../tests/fixtures/mvn_dep_tree_simple.txt"); + let output = filter_mvn_dep_tree(input); + assert!( + output.contains("com.example:my-app:jar:1.0.0"), + "should contain root artifact, got: {}", + output + ); + assert!( + output.contains("slf4j-api"), + "should contain direct dep" + ); + assert!( + output.contains("guava"), + "should contain guava" + ); + assert!( + !output.contains("[INFO]"), + "should strip [INFO] prefix" + ); + assert!( + !output.contains("BUILD SUCCESS"), + "should strip boilerplate" + ); + assert!( + !output.contains("Scanning"), + "should strip preamble" + ); + } + + #[test] + fn test_dep_tree_conflicts() { + let input = include_str!("../../../tests/fixtures/mvn_dep_tree_conflicts.txt"); + let output = filter_mvn_dep_tree(input); + assert!( + output.contains("omitted for conflict with 2.18.3"), + "should keep conflict info, got: {}", + output + ); + assert!( + !output.contains("BUILD SUCCESS"), + "should strip boilerplate" + ); + } + + #[test] + fn test_dep_tree_beacon_strips_duplicates() { + let input = include_str!("../../../tests/fixtures/mvn_dep_tree_beacon.txt"); + let output = filter_mvn_dep_tree(input); + assert!( + !output.contains("omitted for duplicate"), + "should strip all 'omitted for duplicate' lines" + ); + assert!( + output.contains("com.skillpanel:beacon"), + "should contain root artifact" + ); + assert!( + output.contains("spring-boot-starter-web"), + "should contain direct deps" + ); + } + + #[test] + fn test_dep_tree_beacon_cleans_version_managed() { + let input = include_str!("../../../tests/fixtures/mvn_dep_tree_beacon.txt"); + let output = filter_mvn_dep_tree(input); + assert!( + !output.contains("version managed from"), + "should strip 'version managed' annotations" + ); + } + + #[test] + fn test_dep_tree_beacon_savings() { + let input = include_str!("../../../tests/fixtures/mvn_dep_tree_beacon.txt"); + let output = filter_mvn_dep_tree(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 60.0, + "mvn dep tree beacon: expected >=60% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens, + ); + } + + #[test] + fn test_dep_tree_simple_savings() { + let input = include_str!("../../../tests/fixtures/mvn_dep_tree_simple.txt"); + let output = filter_mvn_dep_tree(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 30.0, + "mvn dep tree simple: expected >=30% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens, + ); + } + + #[test] + fn test_dep_tree_empty() { + let output = filter_mvn_dep_tree(""); + assert_eq!(output, "mvn dependency:tree: no output"); + } + + #[test] + fn test_dep_tree_ansi_codes_stripped() { + let input = "\x1b[34;1m[INFO]\x1b[0m com.example:app:jar:1.0\n\ + \x1b[34;1m[INFO]\x1b[0m +- org.junit:junit:jar:5.0:test\n\ + \x1b[34;1m[INFO]\x1b[0m | \\- org.hamcrest:hamcrest:jar:2.0:test\n\ + \x1b[34;1m[INFO]\x1b[0m \\- com.google:guava:jar:33.0:compile"; + let output = filter_mvn_dep_tree(input); + assert!( + !output.contains("\x1b["), + "output should not contain ANSI escape codes" + ); + assert!( + output.contains("com.example:app"), + "should contain root artifact" + ); + assert!( + output.contains("junit"), + "should contain direct dep" + ); + assert!( + !output.contains("hamcrest"), + "should collapse transitive dep" + ); + } + + #[test] + fn test_dep_tree_large_collapses_transitive() { + let input = include_str!("../../../tests/fixtures/mvn_dep_tree_large.txt"); + let output = filter_mvn_dep_tree(input); + + // Should show root artifact + assert!( + output.contains("com.example.demo:webapp"), + "should contain root artifact" + ); + + // Direct deps should be listed + assert!( + output.contains("spring-boot-starter-actuator"), + "should contain direct dep" + ); + + // Transitive deps (depth 2+) should NOT appear as separate lines + assert!( + !output.contains("logback-classic"), + "should not show transitive dep logback-classic" + ); + assert!( + !output.contains("logback-core"), + "should not show transitive dep logback-core" + ); + + // Direct deps with children should show transitive count + assert!( + output.contains("transitive"), + "should show transitive count for deps with children" + ); + + // Output should be dramatically smaller + let output_lines = output.lines().count(); + assert!( + output_lines < 40, + "collapsed tree should be under 40 lines, got {}", + output_lines + ); + } + + #[test] + fn test_dep_tree_large_savings_above_80() { + let input = include_str!("../../../tests/fixtures/mvn_dep_tree_large.txt"); + let output = filter_mvn_dep_tree(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 80.0, + "mvn dep tree large: expected >=80% savings, got {:.1}% ({} -> {} tokens)", + savings, input_tokens, output_tokens, + ); + } + + // --- compile filter tests (auth project: jOOQ + typescript-generator + React) --- + + #[test] + fn test_filter_compile_auth() { + let input = include_str!("../../../tests/fixtures/mvn_compile_auth.txt"); + let output = filter_mvn_compile(input); + + // Must preserve critical lines + assert!( + output.contains("BUILD SUCCESS"), + "should keep BUILD SUCCESS, got: {}", + output + ); + assert!( + output.contains("Total time:"), + "should keep Total time" + ); + + // Must strip plugin noise + assert!( + !output.contains("[stdout]"), + "should strip [stdout] lines" + ); + assert!( + !output.contains("Generating table"), + "should strip jOOQ codegen" + ); + assert!( + !output.contains("Generating record"), + "should strip jOOQ record gen" + ); + assert!( + !output.contains("Generating routine"), + "should strip jOOQ routine gen" + ); + assert!( + !output.contains("Missing name"), + "should strip jOOQ warnings" + ); + assert!( + !output.contains("kB build/static"), + "should strip bundle sizes" + ); + assert!( + !output.contains("The project was built"), + "should strip CRA messages" + ); + assert!( + !output.contains("npm fund"), + "should strip npm messages" + ); + assert!( + !output.contains("Server Version:"), + "should strip Docker bare text" + ); + assert!( + !output.contains("Parsing"), + "should strip typescript-generator parsing lines" + ); + assert!( + !output.contains("Loading class"), + "should strip typescript-generator loading lines" + ); + } + + #[test] + fn test_compile_auth_savings() { + let input = include_str!("../../../tests/fixtures/mvn_compile_auth.txt"); + let output = filter_mvn_compile(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 90.0, + "mvn compile auth: expected >=90% savings, got {:.1}% ({} -> {} tokens)\nOutput:\n{}", + savings, + input_tokens, + output_tokens, + output, + ); + } + + #[test] + fn test_compile_success_only() { + let input = "[INFO] BUILD SUCCESS\n[INFO] Total time: 2.5 s\n"; + let output = filter_mvn_compile(input); + assert!(output.contains("BUILD SUCCESS")); + assert!(output.contains("Total time:")); + } + + #[test] + fn test_compile_strips_stdout_lines() { + let input = "[INFO] [stdout] Parsing 'com.example.Foo'\n\ + [INFO] [stdout] Loading class java.lang.String\n\ + [INFO] [stdout] Writing declarations to: /tmp/out.d.ts\n\ + [INFO] BUILD SUCCESS\n\ + [INFO] Total time: 1.0 s\n"; + let output = filter_mvn_compile(input); + assert!(!output.contains("[stdout]"), "should strip all [stdout] lines"); + assert!(output.contains("BUILD SUCCESS")); + } + + #[test] + fn test_compile_strips_codegen_config() { + let input = "[INFO] dialect : POSTGRES_15\n\ + [INFO] generated : false\n\ + [INFO] JPA : false\n\ + [INFO] BUILD SUCCESS\n\ + [INFO] Total time: 1.0 s\n"; + let output = filter_mvn_compile(input); + assert!(!output.contains("dialect"), "should strip codegen config"); + assert!(!output.contains("JPA"), "should strip codegen config"); + assert!(output.contains("BUILD SUCCESS")); + } + + #[test] + fn test_compile_strips_bundle_sizes() { + let input = "[INFO] 257.55 kB build/static/js/main.js\n\ + [INFO] 40.41 kB build/static/js/962.chunk.js\n\ + [INFO] 918 B build/static/js/636.chunk.js\n\ + [INFO] BUILD SUCCESS\n\ + [INFO] Total time: 1.0 s\n"; + let output = filter_mvn_compile(input); + assert!(!output.contains("kB"), "should strip bundle sizes"); + assert!(!output.contains("918 B"), "should strip small bundle sizes"); + assert!(output.contains("BUILD SUCCESS")); + } + + #[test] + fn test_compile_preserves_errors() { + let input = "[INFO] Compiling 42 source files\n\ + [ERROR] /src/Foo.java:[10,5] cannot find symbol\n\ + [INFO] BUILD FAILURE\n\ + [INFO] Total time: 1.0 s\n"; + let output = filter_mvn_compile(input); + assert!( + output.contains("[ERROR]"), + "should preserve [ERROR] lines, got: {}", + output + ); + assert!(output.contains("cannot find symbol")); + assert!(output.contains("BUILD FAILURE")); + } + + // --- run_other routing --- + + #[test] + fn test_route_goal() { + // Compile-family → compile filter + assert_eq!(route_goal("compile"), GoalRouting::Compile); + assert_eq!(route_goal("process-classes"), GoalRouting::Compile); + assert_eq!(route_goal("test-compile"), GoalRouting::Compile); + + // Checkstyle (both canonical and short form) + assert_eq!(route_goal("checkstyle:check"), GoalRouting::Checkstyle); + assert_eq!(route_goal("checkstyle"), GoalRouting::Checkstyle); + + // Rare lifecycle phases → passthrough (rare in real usage) + assert_eq!(route_goal("package"), GoalRouting::Passthrough); + assert_eq!(route_goal("install"), GoalRouting::Passthrough); + assert_eq!(route_goal("verify"), GoalRouting::Passthrough); + assert_eq!(route_goal("clean"), GoalRouting::Passthrough); + assert_eq!(route_goal("deploy"), GoalRouting::Passthrough); + + // Long-running / interactive goals must always passthrough + assert_eq!(route_goal("spring-boot:run"), GoalRouting::Passthrough); + assert_eq!(route_goal("quarkus:dev"), GoalRouting::Passthrough); + + // Unknown / typo: passthrough (safer default) + assert_eq!(route_goal("compilee"), GoalRouting::Passthrough); + assert_eq!(route_goal(""), GoalRouting::Passthrough); + } + + // --- checkstyle filter tests --- + + #[test] + fn test_filter_checkstyle_clean() { + let input = include_str!("../../../tests/fixtures/mvn_checkstyle_clean.txt"); + let output = filter_mvn_checkstyle(input); + + // Keep success summary + assert!( + output.contains("0 Checkstyle violations"), + "should keep violation-count summary, got: {}", + output + ); + assert!(output.contains("BUILD SUCCESS"), "should keep BUILD SUCCESS"); + assert!(output.contains("Total time"), "should keep Total time"); + + // Strip ANSI escapes (fixture has them) + assert!( + !output.contains('\x1b'), + "should strip ANSI escape codes" + ); + + // Strip mvnd/maven 3.9+ startup noise + assert!( + !output.contains("auto-discovered prefixes"), + "should strip 'Loaded N auto-discovered prefixes' lines" + ); + assert!( + !output.contains("Scanning for projects"), + "should strip 'Scanning for projects'" + ); + + // Savings ≥60% + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + assert!( + savings >= 60.0, + "mvn checkstyle clean: expected >=60% savings, got {:.1}% ({} -> {})\nOutput:\n{}", + savings, + input_tokens, + output_tokens, + output, + ); + } + + #[test] + fn test_filter_checkstyle_clean_native_warnings() { + let input = + include_str!("../../../tests/fixtures/mvn_checkstyle_clean_native.txt"); + let output = filter_mvn_checkstyle(input); + + assert!(output.contains("0 Checkstyle violations")); + assert!(output.contains("BUILD SUCCESS")); + + // Strip JVM restricted-method / native-access warnings (non-prefixed WARNING:) + assert!( + !output.contains("sun.misc.Unsafe"), + "should strip JVM native-access warnings" + ); + assert!( + !output.contains("native-access"), + "should strip --enable-native-access hints" + ); + + // Strip os-maven-plugin detection lines + assert!( + !output.contains("os.detected"), + "should strip [INFO] os.detected.* lines" + ); + + let savings = 100.0 + - (count_tokens(&output) as f64 / count_tokens(input) as f64 * 100.0); + assert!( + savings >= 60.0, + "mvn checkstyle clean (native): expected >=60% savings, got {:.1}%", + savings + ); + } + + #[test] + fn test_filter_checkstyle_violations() { + let input = + include_str!("../../../tests/fixtures/mvn_checkstyle_violations.txt"); + let output = filter_mvn_checkstyle(input); + + // Keep: error-count summary + assert!( + output.contains("4 errors reported by Checkstyle"), + "should keep '4 errors reported' summary, got:\n{}", + output + ); + + // Keep: final result + assert!(output.contains("BUILD FAILURE")); + assert!(output.contains("Total time")); + + // Keep: each of 4 violations (rule name must survive the rewrite) + for rule in &[ + "UnusedImports", + "MethodName", + "LineLength", + "LocalVariableName", + ] { + assert!( + output.contains(rule), + "should keep violation rule {}, got:\n{}", + rule, + output + ); + } + + // Strip: maven Help-link boilerplate + assert!( + !output.contains("To see the full stack trace"), + "should strip 'To see the full stack trace' boilerplate" + ); + assert!( + !output.contains("MojoFailureException"), + "should strip Help-link MojoFailureException reference" + ); + assert!( + !output.contains("Failed to execute goal org.apache.maven.plugins"), + "should strip 'Failed to execute goal …' [ERROR] line" + ); + + // Exactly 4 rewritten violation lines (one per rule above). + // Our compact format is ` :: [] `. + let violation_count = output + .lines() + .filter(|l| l.contains("ExternalAppId.java") && l.contains('[')) + .count(); + assert_eq!( + violation_count, 4, + "expected exactly 4 violation lines, got {}:\n{}", + violation_count, output + ); + + // Strip: mvn startup noise (fixture has 7 `auto-discovered prefixes` lines) + assert!(!output.contains("auto-discovered prefixes")); + + // Savings ≥60% + let savings = 100.0 + - (count_tokens(&output) as f64 / count_tokens(input) as f64 * 100.0); + assert!( + savings >= 60.0, + "mvn checkstyle violations: expected >=60% savings, got {:.1}%\nOutput:\n{}", + savings, + output + ); + } + + #[test] + fn test_filter_verify_auth_counts() { + let input = include_str!("../../../tests/fixtures/mvn_verify_auth.txt"); + let output = filter_mvn_test(input); + assert!( + output.contains("941 passed"), + "should accumulate surefire+failsafe (688+262)=950 run, minus 9 skipped = 941 passed, got: {}", + output + ); + assert!( + output.contains("9 skipped"), + "should accumulate skipped (8 surefire + 1 failsafe), got: {}", + output + ); + assert!( + output.contains("02:11 min"), + "should preserve total time, got: {}", + output + ); + assert!( + !output.contains("BUILD FAILURE"), + "passing verify run should not say FAILURE, got: {}", + output + ); + } + + #[test] + fn test_filter_verify_auth_savings() { + let input = include_str!("../../../tests/fixtures/mvn_verify_auth.txt"); + let output = filter_mvn_test(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 90.0, + "mvn verify auth: expected >=90% savings, got {:.1}% ({} -> {} tokens)", + savings, + input_tokens, + output_tokens, + ); + } +} diff --git a/src/cmds/mod.rs b/src/cmds/mod.rs index 1eca0b84c..0f25bd8de 100644 --- a/src/cmds/mod.rs +++ b/src/cmds/mod.rs @@ -4,6 +4,7 @@ pub mod cloud; pub mod dotnet; pub mod git; pub mod go; +pub mod java; pub mod js; pub mod python; pub mod ruby; diff --git a/src/core/toml_filter.rs b/src/core/toml_filter.rs index 06060d22d..74b61026d 100644 --- a/src/core/toml_filter.rs +++ b/src/core/toml_filter.rs @@ -1582,7 +1582,6 @@ match_command = "^make\\b" "markdownlint", "mix-compile", "mix-format", - "mvn-build", "ping", "pio-run", "poetry-install", @@ -1621,8 +1620,8 @@ match_command = "^make\\b" let filters = make_filters(BUILTIN_TOML); assert_eq!( filters.len(), - 59, - "Expected exactly 59 built-in filters, got {}. \ + 58, + "Expected exactly 58 built-in filters, got {}. \ Update this count when adding/removing filters in src/filters/.", filters.len() ); @@ -1679,11 +1678,11 @@ expected = "output line 1\noutput line 2" let combined = format!("{}\n\n{}", BUILTIN_TOML, new_filter); let filters = make_filters(&combined); - // All 59 existing filters still present + 1 new = 60 + // All 58 existing filters still present + 1 new = 59 assert_eq!( filters.len(), - 60, - "Expected 60 filters after concat (59 built-in + 1 new)" + 59, + "Expected 59 filters after concat (58 built-in + 1 new)" ); // New filter is discoverable diff --git a/src/discover/rules.rs b/src/discover/rules.rs index 11359496c..a6642269d 100644 --- a/src/discover/rules.rs +++ b/src/discover/rules.rs @@ -672,11 +672,11 @@ pub const RULES: &[RtkRule] = &[ subcmd_status: &[], }, RtkRule { - pattern: r"^mvn\s+(compile|package|clean|install)\b", + pattern: r"^(\.\/?)?mvnw?\s+(test|compile|package|clean|install|dependency:tree)\b", rtk_cmd: "rtk mvn", - rewrite_prefixes: &["mvn"], + rewrite_prefixes: &["mvn", "mvnw", "./mvnw"], category: "Build", - savings_pct: 70.0, + savings_pct: 90.0, subcmd_savings: &[], subcmd_status: &[], }, diff --git a/src/filters/mvn-build.toml b/src/filters/mvn-build.toml deleted file mode 100644 index 430a7f0cd..000000000 --- a/src/filters/mvn-build.toml +++ /dev/null @@ -1,44 +0,0 @@ -[filters.mvn-build] -description = "Compact Maven build output" -match_command = "^mvn\\s+(compile|package|clean|install)\\b" -strip_ansi = true -strip_lines_matching = [ - "^\\[INFO\\] ---", - "^\\[INFO\\] Building\\s", - "^\\[INFO\\] Downloading\\s", - "^\\[INFO\\] Downloaded\\s", - "^\\[INFO\\]\\s*$", - "^\\s*$", - "^Downloading:", - "^Downloaded:", - "^Progress", -] -max_lines = 50 -on_empty = "mvn: ok" - -[[tests.mvn-build]] -name = "strips INFO noise, preserves errors and summary" -input = """ -[INFO] --- -[INFO] Building myapp 1.0-SNAPSHOT -[INFO] Downloading org.apache.maven.plugins:maven-compiler-plugin:3.11.0 -[INFO] Downloaded org.apache.maven.plugins:maven-compiler-plugin:3.11.0 -[INFO] -[ERROR] /src/main/java/Main.java:[10,5] cannot find symbol - symbol: method foo() -[INFO] BUILD FAILURE -[INFO] Total time: 2.543 s -""" -expected = "[ERROR] /src/main/java/Main.java:[10,5] cannot find symbol\n symbol: method foo()\n[INFO] BUILD FAILURE\n[INFO] Total time: 2.543 s" - -[[tests.mvn-build]] -name = "successful build keeps BUILD SUCCESS line" -input = """ -[INFO] --- -[INFO] Building myapp 1.0-SNAPSHOT -[INFO] -[INFO] BUILD SUCCESS -[INFO] Total time: 4.123 s -[INFO] Finished at: 2024-01-15T10:30:00Z -""" -expected = "[INFO] BUILD SUCCESS\n[INFO] Total time: 4.123 s\n[INFO] Finished at: 2024-01-15T10:30:00Z" diff --git a/src/main.rs b/src/main.rs index 1d139d958..15e40b2d7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,6 +11,7 @@ use cmds::cloud::{aws_cmd, container, curl_cmd, psql_cmd, wget_cmd}; use cmds::dotnet::{binlog, dotnet_cmd, dotnet_format_report, dotnet_trx}; use cmds::git::{diff_cmd, gh_cmd, git, gt_cmd}; use cmds::go::{go_cmd, golangci_cmd}; +use cmds::java::mvn_cmd; use cmds::js::{ lint_cmd, next_cmd, npm_cmd, playwright_cmd, pnpm_cmd, prettier_cmd, prisma_cmd, tsc_cmd, vitest_cmd, @@ -685,6 +686,12 @@ enum Commands { command: GoCommands, }, + /// Maven commands with compact output + Mvn { + #[command(subcommand)] + command: MvnCommands, + }, + /// Graphite (gt) stacked PR commands with compact output Gt { #[command(subcommand)] @@ -1073,6 +1080,35 @@ enum GoCommands { Other(Vec), } +#[derive(Debug, Subcommand)] +enum MvnCommands { + /// Run tests with compact output (state machine parser, 99%+ token reduction) + Test { + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, + }, + /// Compile with compact output (strip [INFO] noise, keep errors and summary) + Compile { + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, + }, + /// Run `mvn checkstyle:check` with grouped violations and stripped boilerplate + #[command(name = "checkstyle:check", alias = "checkstyle")] + Checkstyle { + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, + }, + /// Dependency tree with compact output (strip duplicates and boilerplate) + #[command(name = "dependency:tree")] + DepTree { + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, + }, + /// Passthrough: runs any unsupported mvn subcommand directly + #[command(external_subcommand)] + Other(Vec), +} + /// RTK-only subcommands that should never fall back to raw execution. /// If Clap fails to parse these, show the Clap error directly. const RTK_META_COMMANDS: &[&str] = &[ @@ -2039,6 +2075,14 @@ fn run_cli() -> Result { GoCommands::Other(args) => go_cmd::run_other(&args, cli.verbose)?, }, + Commands::Mvn { command } => match command { + MvnCommands::Test { args } => mvn_cmd::run_test(&args, cli.verbose)?, + MvnCommands::Compile { args } => mvn_cmd::run_compile(&args, cli.verbose)?, + MvnCommands::Checkstyle { args } => mvn_cmd::run_checkstyle(&args, cli.verbose)?, + MvnCommands::DepTree { args } => mvn_cmd::run_dep_tree(&args, cli.verbose)?, + MvnCommands::Other(args) => mvn_cmd::run_other(&args, cli.verbose)?, + }, + Commands::Gt { command } => match command { GtCommands::Log { args } => gt_cmd::run_log(&args, cli.verbose)?, GtCommands::Submit { args } => gt_cmd::run_submit(&args, cli.verbose)?, @@ -2381,6 +2425,7 @@ fn is_operational_command(cmd: &Commands) -> bool { | Commands::Rspec { .. } | Commands::Pip { .. } | Commands::Go { .. } + | Commands::Mvn { .. } | Commands::GolangciLint { .. } | Commands::Gt { .. } ) diff --git a/tests/fixtures/mvn_checkstyle_clean.txt b/tests/fixtures/mvn_checkstyle_clean.txt new file mode 100644 index 000000000..a381da719 --- /dev/null +++ b/tests/fixtures/mvn_checkstyle_clean.txt @@ -0,0 +1,33 @@ +[INFO] Loaded 22539 auto-discovered prefixes for remote repository central (prefixes-central.txt) +[INFO] Loaded 74 auto-discovered prefixes for remote repository apache.snapshots (prefixes-apache.snapshots-c1d4c55f9308e5ac18a4069bed41dca64d85c515.txt) +[INFO] Scanning for projects... +[INFO] Loaded 22456 auto-discovered prefixes for remote repository google-maven-central-copy (prefixes-google-maven-central-copy-c4cd290f3f22839f5b0a2afe35defbe31dfa3a63.txt) +[INFO] Loaded 22539 auto-discovered prefixes for remote repository central (prefixes-central.txt) +[INFO] Loaded 22539 auto-discovered prefixes for remote repository maven-central (prefixes-maven-central-de66ab544dabb92499170649134bc3bd0ea8afac.txt) +[INFO] Loaded 22456 auto-discovered prefixes for remote repository google-maven-central-copy (prefixes-google-maven-central-copy-618f292136d6a9d006041d40d67cd906e4edc388.txt) +[WARNING] +[WARNING] 1 problem was encountered while building the effective model for 'com.devskiller:auth:jar:1.3-SNAPSHOT' (use -e to see details) +[WARNING] +[WARNING] Total model problems reported: 1 +[WARNING] +[WARNING] It is highly recommended to fix these problems because they threaten the stability of your build. +[WARNING] +[WARNING] For this reason, future Maven versions might no longer support building such malformed projects. +[WARNING] +[INFO] Loaded 22539 auto-discovered prefixes for remote repository central (prefixes-central.txt) +[INFO] Loaded 74 auto-discovered prefixes for remote repository apache.snapshots (prefixes-apache.snapshots-c1d4c55f9308e5ac18a4069bed41dca64d85c515.txt) +[INFO] +[INFO] -------------------------------------------------< com.devskiller:auth >-------------------------------------------------- +[INFO] Building auth 1.3-SNAPSHOT +[INFO] from pom.xml +[INFO] ---------------------------------------------------------[ jar ]---------------------------------------------------------- +[INFO] +[INFO] --- checkstyle:3.6.0:check (default-cli) @ auth --- +[INFO] Loaded 22459 auto-discovered prefixes for remote repository jvnet-nexus-releases (prefixes-jvnet-nexus-releases-a5d540dd75d8efce5a7e6a5ccab82bab11416135.txt) +[INFO] You have 0 Checkstyle violations. +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] BUILD SUCCESS +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] Total time: 0.905 s +[INFO] Finished at: 2026-04-14T17:16:37+02:00 +[INFO] -------------------------------------------------------------------------------------------------------------------------- diff --git a/tests/fixtures/mvn_checkstyle_clean_native.txt b/tests/fixtures/mvn_checkstyle_clean_native.txt new file mode 100644 index 000000000..6bb982ccf --- /dev/null +++ b/tests/fixtures/mvn_checkstyle_clean_native.txt @@ -0,0 +1,37 @@ +WARNING: A restricted method in java.lang.System has been called +WARNING: java.lang.System::load has been called by org.fusesource.jansi.internal.JansiLoader in an unnamed module (file:/home/mariusz/.m2/wrapper/dists/apache-maven-3.9.9/3477a4f1/lib/jansi-2.4.1.jar) +WARNING: Use --enable-native-access=ALL-UNNAMED to avoid a warning for callers in this module +WARNING: Restricted methods will be blocked in a future release unless native access is enabled + +WARNING: A terminally deprecated method in sun.misc.Unsafe has been called +WARNING: sun.misc.Unsafe::objectFieldOffset has been called by com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper (file:/home/mariusz/.m2/wrapper/dists/apache-maven-3.9.9/3477a4f1/lib/guava-33.2.1-jre.jar) +WARNING: Please consider reporting this to the maintainers of class com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper +WARNING: sun.misc.Unsafe::objectFieldOffset will be removed in a future release +[INFO] Scanning for projects... +[INFO] ------------------------------------------------------------------------ +[INFO] Detecting the operating system and CPU architecture +[INFO] ------------------------------------------------------------------------ +[INFO] os.detected.name: linux +[INFO] os.detected.arch: x86_64 +[INFO] os.detected.bitness: 64 +[INFO] os.detected.version: 6.19 +[INFO] os.detected.version.major: 6 +[INFO] os.detected.version.minor: 19 +[INFO] os.detected.release: fedora +[INFO] os.detected.release.version: 43 +[INFO] os.detected.release.like.fedora: true +[INFO] os.detected.classifier: linux-x86_64 +[INFO] +[INFO] -------------------------< com.devskiller:map >------------------------- +[INFO] Building map 1.0-SNAPSHOT +[INFO] from pom.xml +[INFO] --------------------------------[ jar ]--------------------------------- +[INFO] +[INFO] --- checkstyle:3.6.0:check (default-cli) @ map --- +[INFO] You have 0 Checkstyle violations. +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD SUCCESS +[INFO] ------------------------------------------------------------------------ +[INFO] Total time: 5.567 s +[INFO] Finished at: 2026-04-14T17:16:35+02:00 +[INFO] ------------------------------------------------------------------------ diff --git a/tests/fixtures/mvn_checkstyle_violations.txt b/tests/fixtures/mvn_checkstyle_violations.txt new file mode 100644 index 000000000..dbb997597 --- /dev/null +++ b/tests/fixtures/mvn_checkstyle_violations.txt @@ -0,0 +1,44 @@ +[INFO] Loaded 22539 auto-discovered prefixes for remote repository central (prefixes-central.txt) +[INFO] Loaded 74 auto-discovered prefixes for remote repository apache.snapshots (prefixes-apache.snapshots-c1d4c55f9308e5ac18a4069bed41dca64d85c515.txt) +[INFO] Scanning for projects... +[INFO] Loaded 22456 auto-discovered prefixes for remote repository google-maven-central-copy (prefixes-google-maven-central-copy-c4cd290f3f22839f5b0a2afe35defbe31dfa3a63.txt) +[INFO] Loaded 22539 auto-discovered prefixes for remote repository central (prefixes-central.txt) +[INFO] Loaded 22539 auto-discovered prefixes for remote repository maven-central (prefixes-maven-central-de66ab544dabb92499170649134bc3bd0ea8afac.txt) +[INFO] Loaded 22456 auto-discovered prefixes for remote repository google-maven-central-copy (prefixes-google-maven-central-copy-618f292136d6a9d006041d40d67cd906e4edc388.txt) +[WARNING] +[WARNING] 1 problem was encountered while building the effective model for 'com.devskiller:auth:jar:1.3-SNAPSHOT' (use -e to see details) +[WARNING] +[WARNING] Total model problems reported: 1 +[WARNING] +[WARNING] It is highly recommended to fix these problems because they threaten the stability of your build. +[WARNING] +[WARNING] For this reason, future Maven versions might no longer support building such malformed projects. +[WARNING] +[INFO] Loaded 22539 auto-discovered prefixes for remote repository central (prefixes-central.txt) +[INFO] Loaded 74 auto-discovered prefixes for remote repository apache.snapshots (prefixes-apache.snapshots-c1d4c55f9308e5ac18a4069bed41dca64d85c515.txt) +[INFO] +[INFO] ------------------------< com.devskiller:auth >------------------------- +[INFO] Building auth 1.3-SNAPSHOT +[INFO] from pom.xml +[INFO] --------------------------------[ jar ]--------------------------------- +[INFO] +[INFO] --- checkstyle:3.6.0:check (default-cli) @ auth --- +[INFO] Loaded 22459 auto-discovered prefixes for remote repository jvnet-nexus-releases (prefixes-jvnet-nexus-releases-a5d540dd75d8efce5a7e6a5ccab82bab11416135.txt) +[INFO] There are 4 errors reported by Checkstyle 13.3.0 with checkstyle.xml ruleset. +[ERROR] src/main/java/com/devskiller/auth/app/ExternalAppId.java:[3,8] (imports) UnusedImports: Unused import - java.util.List. +[ERROR] src/main/java/com/devskiller/auth/app/ExternalAppId.java:[9,19] (naming) MethodName: Name 'BadMethodName' must match pattern '^[a-z][a-zA-Z0-9]*$'. +[ERROR] src/main/java/com/devskiller/auth/app/ExternalAppId.java:[10] (sizes) LineLength: Line is longer than 200 characters (found 241). +[ERROR] src/main/java/com/devskiller/auth/app/ExternalAppId.java:[10,16] (naming) LocalVariableName: Name 'BadVariable' must match pattern '^([a-z][a-zA-Z0-9]*|_)$'. +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD FAILURE +[INFO] ------------------------------------------------------------------------ +[INFO] Total time: 0.907 s +[INFO] Finished at: 2026-04-14T17:19:19+02:00 +[INFO] ------------------------------------------------------------------------ +[ERROR] Failed to execute goal org.apache.maven.plugins:maven-checkstyle-plugin:3.6.0:check (default-cli) on project auth: You have 4 Checkstyle violations. -> [Help 1] +[ERROR] +[ERROR] To see the full stack trace of the errors, re-run Maven with the '-e' switch +[ERROR] Re-run Maven using the '-X' switch to enable verbose output +[ERROR] +[ERROR] For more information about the errors and possible solutions, please read the following articles: +[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException diff --git a/tests/fixtures/mvn_compile_auth.txt b/tests/fixtures/mvn_compile_auth.txt new file mode 100644 index 000000000..d14fcef56 --- /dev/null +++ b/tests/fixtures/mvn_compile_auth.txt @@ -0,0 +1,132 @@ +[INFO] argLine set to -javaagent:/home/user/.m2/repository/org/jacoco/org.jacoco.agent/0.8.14/org.jacoco.agent-0.8.14-runtime.jar=destfile=/home/user/project/target/jacoco.exec +Server Version: 28.1.1 +API Version: 1.49 +Operating System: Fedora Linux 43 (Workstation Edition) +Total Memory: 61869 MB +[INFO] [stdout] +[INFO] [stdout] UPDATE SUMMARY +[INFO] [stdout] Run: 352 +[INFO] [stdout] Previously run: 0 +[INFO] [stdout] Filtered out: 0 +[INFO] [stdout] Total change sets: 352 +[INFO] [stdout] +[INFO] [stdout] Liquibase: Update has been successful. +[INFO] Migration completed +[INFO] Database : Inferring driver org.postgresql.Driver from URL jdbc:postgresql://localhost:32811/test?loggerLevel=OFF +[INFO] Database : Inferring database org.jooq.meta.postgres.PostgresDatabase from URL jdbc:postgresql://localhost:32811/test?loggerLevel=OFF +[INFO] No was provided. Generating ALL available catalogs instead. +[INFO] License parameters +[INFO] Thank you for using jOOQ and jOOQ's code generator +[INFO] Database parameters +[INFO] dialect : POSTGRES_15 +[INFO] URL : jdbc:postgresql://localhost:32811/test?loggerLevel=OFF +[INFO] target dir : /home/user/project/target/generated-sources/jooq +[INFO] target package : com.example.webapp.jooq +[INFO] includes : [.*] +[INFO] JavaGenerator parameters +[INFO] annotations +[INFO] generated : false +[INFO] JPA : false +[INFO] JPA version : +[INFO] comments +[INFO] comments : true +[INFO] sources +[INFO] sources : true +[INFO] global references +[INFO] global references : true +[INFO] object types +[INFO] interfaces (immutable) : false +[INFO] table-valued functions : true +[INFO] other +[INFO] Generation remarks +[INFO] Generating catalogs : Total: 1 +[INFO] Version : Database version is supported by dialect POSTGRES_15: 17.5 (Debian 17.5-1.pgdg120+1) +[INFO] ARRAYs fetched : 0 (0 included, 0 excluded) +[INFO] Tables fetched : 42 (40 included, 2 excluded) +[INFO] Routines fetched : 35 (35 included, 0 excluded) +[INFO] No schema version is applied for catalog . Regenerating. +[INFO] Generating catalog : DefaultCatalog.java +[INFO] ========================================================== +[INFO] Generating schemata : Total: 1 +[INFO] Generating schema : Public.java +[INFO] Generating tables +[INFO] Indexes fetched : 61 (61 included, 0 excluded) +[INFO] Generating table : Company.java [input=company, pk=company_pkey] +[INFO] Generating table : User.java [input=user, pk=user_pkey] +[INFO] Generating table : Role.java [input=role, pk=role_pkey] +- https://www.jooq.org/doc/latest/manual/code-generation/codegen-generatorstrategy/ +- https://www.jooq.org/doc/latest/manual/code-generation/codegen-matcherstrategy/ +[INFO] Tables generated : Total: 421.766ms +[INFO] Generating table records +[INFO] Generating record : CompanyRecord.java +[INFO] Generating record : UserRecord.java +[INFO] Table records generated : Total: 449.399ms, +27.632ms +[INFO] Generating routines and table-valued functions +[INFO] Generating routine : Encrypt.java +[INFO] Missing name : Routine public.encrypt holds a parameter without a name at position 1 +[INFO] Missing name : Routine public.encrypt holds a parameter without a name at position 2 +[INFO] Routines generated : Total: 509.982ms, +52.111ms +[INFO] Generation finished: public: Total: 510.052ms, +0.07ms +[INFO] Affected files: 121 +[INFO] Modified files: 0 +[INFO] No modified files : This code generation run has not produced any file modifications. +This means, the schema has not changed, and no other parameters (jOOQ version, driver version, database version, +and any configuration elements) have changed either. +In automated builds, it is recommended to prevent unnecessary code generation runs. This run took: 500.067ms +Possible means to prevent this: +- Use manual code generation and check in generated sources: https://www.jooq.org/doc/latest/manual/code-generation/codegen-version-control/ +- Use schema version providers: https://www.jooq.org/doc/latest/manual/code-generation/codegen-advanced/codegen-config-database/codegen-database-version-providers/ +- Use gradle tasks and inputs: https://docs.gradle.org/current/userguide/incremental_build.html +[INFO] Removing excess files +[INFO] Source directory: /home/user/project/target/generated-sources/jooq added. +[INFO] Node v20.18.1 is already installed. +[INFO] > my-webapp-frontend@0.1.0 postinstall +[INFO] > ./scripts/git-settings.sh +[INFO] added 1667 packages, and audited 1668 packages in 3s +[INFO] 354 packages are looking for funding +[INFO] run `npm fund` for details +[INFO] 30 vulnerabilities (9 low, 5 moderate, 15 high, 1 critical) +[INFO] To address issues that do not require attention, run: +[INFO] npm audit fix +[INFO] To address all issues (including breaking changes), run: +[INFO] npm audit fix --force +[INFO] Run `npm audit` for details. +[INFO] > my-webapp-frontend@0.1.0 prebuild +[INFO] > node ./scripts/prebuild.js +[INFO] > my-webapp-frontend@0.1.0 build +[INFO] > npm run build:stage && npm run build:prod +[INFO] > my-webapp-frontend@0.1.0 build:stage +[INFO] > env-cmd -f .env.stage react-app-rewired build && rm -rf dist-stage && mv build dist-stage +[INFO] (node:542456) ExperimentalWarning: Importing JSON modules is an experimental feature and might change at any time +[INFO] (Use `node --trace-warnings ...` to show where the warning was created) +[INFO] Creating an optimized production build... +[INFO] Compiled successfully. +[INFO] File sizes after gzip: +[INFO] 257.55 kB build/static/js/main.js +[INFO] 40.41 kB build/static/js/962.f23c413d.chunk.js +[INFO] 8.43 kB build/static/css/main.d7534f51.css +[INFO] 918 B build/static/js/636.3320b849.chunk.js +[INFO] The project was built assuming it is hosted at /. +[INFO] You can control this with the homepage field in your package.json. +[INFO] The build folder is ready to be deployed. +[INFO] You may serve it with a static server: +[INFO] npm install -g serve +[INFO] serve -s build +[INFO] Find out more about deployment here: +[INFO] https://cra.link/deployment +[INFO] Copying 2 resources from src/main/resources to target/classes +[INFO] Copying 145 resources from frontend/dist-prod to target/classes/static/dist-prod +[INFO] [stdout] Loading class org.jspecify.annotations.Nullable +[INFO] [stdout] Loading class com.example.webapp.shared.TeamId +[INFO] [stdout] Loading class java.time.Instant +[INFO] [stdout] Suggestion: annotation 'org.jspecify.annotations.Nullable' supports 'TYPE_PARAMETER' or 'TYPE_USE' target. +[INFO] [stdout] Parsing 'com.example.webapp.GlobalControllerExceptionHandler$VndErrors' +[INFO] [stdout] Parsing Spring component: com.example.webapp.user.api.UserApiController +[INFO] [stdout] Parsing Spring component: com.example.webapp.user.api.CompanyApiController +[INFO] [stdout] Parsing Spring component: com.example.webapp.sync.api.UserImportApiController +[INFO] [stdout] Parsing 'com.example.webapp.user.api.UserDetailsDto' used in 'UserApiController.getUserDetails' +[INFO] [stdout] Parsing 'com.example.webapp.user.api.AuthContextDto' used in 'SelfController.self' +[INFO] [stdout] Parsing 'com.example.webapp.user.CompanyResource' used in 'CompanyApiController.getCompany' +[INFO] [stdout] Writing declarations to: /home/user/project/frontend/src/types/app.d.ts +[INFO] BUILD SUCCESS +[INFO] Total time: 36.332 s diff --git a/tests/fixtures/mvn_dep_tree_beacon.txt b/tests/fixtures/mvn_dep_tree_beacon.txt new file mode 100644 index 000000000..6d3f6d58b --- /dev/null +++ b/tests/fixtures/mvn_dep_tree_beacon.txt @@ -0,0 +1,652 @@ +[INFO] Scanning for projects... +[INFO] +[INFO] -----------------------< com.skillpanel:beacon >------------------------ +[INFO] Building beacon 1.0-SNAPSHOT +[INFO] from pom.xml +[INFO] --------------------------------[ jar ]--------------------------------- +[INFO] +[INFO] --- dependency:3.9.0:tree (default-cli) @ beacon --- +[INFO] com.skillpanel:beacon:jar:1.0-SNAPSHOT +[INFO] +- org.springframework.boot:spring-boot-starter-web:jar:4.0.3:compile +[INFO] | +- org.springframework.boot:spring-boot-starter-jackson:jar:4.0.3:compile (version managed from 4.0.3; scope not updated to compile) +[INFO] | | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- org.springframework.boot:spring-boot-jackson:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- (tools.jackson.core:jackson-databind:jar:3.0.4:compile - version managed from 3.0.4; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-starter-tomcat:jar:4.0.3:compile (version managed from 4.0.3; scope not updated to compile) +[INFO] | | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- org.springframework.boot:spring-boot-starter-tomcat-runtime:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | | +- (org.springframework.boot:spring-boot-tomcat:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | | +- (org.springframework.boot:spring-boot-web-server:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | | +- (jakarta.annotation:jakarta.annotation-api:jar:3.0.0:compile - version managed from 3.0.0; omitted for duplicate) +[INFO] | | | +- org.apache.tomcat.embed:tomcat-embed-core:jar:11.0.18:compile (version managed from 11.0.18) +[INFO] | | | +- (org.apache.tomcat.embed:tomcat-embed-el:jar:11.0.18:compile - version managed from 11.0.18; omitted for duplicate) +[INFO] | | | \- org.apache.tomcat.embed:tomcat-embed-websocket:jar:11.0.18:compile (version managed from 11.0.18) +[INFO] | | | \- (org.apache.tomcat.embed:tomcat-embed-core:jar:11.0.18:compile - version managed from 11.0.18; omitted for duplicate) +[INFO] | | \- org.springframework.boot:spring-boot-tomcat:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot-web-server:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- (org.apache.tomcat.embed:tomcat-embed-core:jar:11.0.18:compile - version managed from 11.0.18; omitted for duplicate) +[INFO] | | \- (jakarta.annotation:jakarta.annotation-api:jar:3.0.0:runtime - version managed from 3.0.0; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-http-converter:jar:4.0.3:compile (version managed from 4.0.3; scope not updated to compile) +[INFO] | | +- org.springframework.boot:spring-boot:jar:4.0.3:compile (version managed from 4.0.3; scope not updated to compile) +[INFO] | | | +- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | \- (org.springframework:spring-context:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | \- org.springframework:spring-web:jar:7.0.5:compile (version managed from 7.0.5; scope not updated to compile) +[INFO] | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | \- (io.micrometer:micrometer-observation:jar:1.16.3:compile - version managed from 1.16.3; omitted for duplicate) +[INFO] | \- org.springframework.boot:spring-boot-webmvc:jar:4.0.3:compile (version managed from 4.0.3; scope not updated to compile) +[INFO] | +- org.springframework.boot:spring-boot-servlet:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- (org.springframework:spring-web:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | +- (org.springframework:spring-web:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | +- org.springframework:spring-webmvc:jar:7.0.5:compile (version managed from 7.0.5) +[INFO] | | +- (org.springframework:spring-aop:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-context:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-expression:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | \- (org.springframework:spring-web:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | \- (org.springframework.boot:spring-boot-http-converter:jar:4.0.3:runtime - version managed from 4.0.3; omitted for duplicate) +[INFO] +- org.springframework.boot:spring-boot-starter-webflux:jar:4.0.3:compile +[INFO] | +- org.springframework.boot:spring-boot-starter:jar:4.0.3:compile (version managed from 4.0.3; scope not updated to compile) +[INFO] | | +- org.springframework.boot:spring-boot-starter-logging:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | | +- ch.qos.logback:logback-classic:jar:1.5.32:compile (version managed from 1.5.32) +[INFO] | | | | +- ch.qos.logback:logback-core:jar:1.5.32:compile (version managed from 1.5.32) +[INFO] | | | | \- (org.slf4j:slf4j-api:jar:2.0.17:compile - version managed from 2.0.17; omitted for duplicate) +[INFO] | | | +- org.apache.logging.log4j:log4j-to-slf4j:jar:2.25.3:compile (version managed from 2.25.3) +[INFO] | | | | +- org.apache.logging.log4j:log4j-api:jar:2.25.3:compile (version managed from 2.25.3) +[INFO] | | | | \- (org.slf4j:slf4j-api:jar:2.0.17:compile - version managed from 2.0.17; omitted for duplicate) +[INFO] | | | \- org.slf4j:jul-to-slf4j:jar:2.0.17:compile (version managed from 2.0.17) +[INFO] | | | \- (org.slf4j:slf4j-api:jar:2.0.17:compile - version managed from 2.0.17; omitted for duplicate) +[INFO] | | +- (org.springframework.boot:spring-boot-autoconfigure:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- jakarta.annotation:jakarta.annotation-api:jar:3.0.0:compile (version managed from 3.0.0; scope not updated to compile) +[INFO] | | \- (org.yaml:snakeyaml:jar:2.5:compile - version managed from 2.5; omitted for duplicate) +[INFO] | +- (org.springframework.boot:spring-boot-starter-jackson:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-starter-reactor-netty:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- org.springframework.boot:spring-boot-reactor:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | | \- (io.projectreactor:reactor-core:jar:3.8.3:compile - version managed from 3.8.3; omitted for duplicate) +[INFO] | | \- org.springframework.boot:spring-boot-reactor-netty:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot-web-server:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- io.projectreactor.netty:reactor-netty-http:jar:1.3.3:compile (version managed from 1.3.3) +[INFO] | | | +- io.netty:netty-codec-http:jar:4.2.10.Final:compile (version managed from 4.2.10.Final) +[INFO] | | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-codec-base:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-codec-compression:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | \- (io.netty:netty-handler:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- io.netty:netty-codec-http2:jar:4.2.10.Final:compile (version managed from 4.2.10.Final) +[INFO] | | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-codec-base:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-handler:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | \- (io.netty:netty-codec-http:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- io.netty:netty-codec-http3:jar:4.2.10.Final:compile (version managed from 4.2.10.Final) +[INFO] | | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-codec-base:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-codec-http:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-codec-compression:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-handler:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-transport-native-unix-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-resolver:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- io.netty:netty-codec-classes-quic:jar:4.2.10.Final:compile (version managed from 4.2.10.Final; scope not updated to compile) +[INFO] | | | | +- io.netty:netty-codec-native-quic:jar:linux-x86_64:4.2.10.Final:runtime (version managed from 4.2.10.Final; scope managed from runtime) +[INFO] | | | | | \- (io.netty:netty-codec-classes-quic:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- io.netty:netty-codec-native-quic:jar:linux-aarch_64:4.2.10.Final:runtime (version managed from 4.2.10.Final; scope managed from runtime) +[INFO] | | | | | \- (io.netty:netty-codec-classes-quic:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- io.netty:netty-codec-native-quic:jar:osx-x86_64:4.2.10.Final:runtime (version managed from 4.2.10.Final; scope managed from runtime) +[INFO] | | | | | \- (io.netty:netty-codec-classes-quic:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- io.netty:netty-codec-native-quic:jar:osx-aarch_64:4.2.10.Final:runtime (version managed from 4.2.10.Final; scope managed from runtime) +[INFO] | | | | | \- (io.netty:netty-codec-classes-quic:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | \- io.netty:netty-codec-native-quic:jar:windows-x86_64:4.2.10.Final:runtime (version managed from 4.2.10.Final; scope managed from runtime) +[INFO] | | | | \- (io.netty:netty-codec-classes-quic:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- (io.netty:netty-resolver-dns:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- io.netty:netty-resolver-dns-native-macos:jar:osx-x86_64:4.2.10.Final:runtime (version managed from 4.2.10.Final; scope managed from compile) +[INFO] | | | | \- io.netty:netty-resolver-dns-classes-macos:jar:4.2.10.Final:runtime (version managed from 4.2.10.Final) +[INFO] | | | | +- (io.netty:netty-common:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-resolver-dns:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | \- (io.netty:netty-transport-native-unix-common:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.2.10.Final:runtime (version managed from 4.2.10.Final; scope managed from compile) +[INFO] | | | | +- (io.netty:netty-common:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-transport:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-transport-native-unix-common:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | \- io.netty:netty-transport-classes-epoll:jar:4.2.10.Final:runtime (version managed from 4.2.10.Final) +[INFO] | | | | +- (io.netty:netty-common:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-transport:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | \- (io.netty:netty-transport-native-unix-common:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- io.projectreactor.netty:reactor-netty-core:jar:1.3.3:compile (version managed from 1.3.3) +[INFO] | | | | +- (io.netty:netty-handler:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- io.netty:netty-handler-proxy:jar:4.2.10.Final:compile (version managed from 4.2.10.Final) +[INFO] | | | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | +- (io.netty:netty-codec-base:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | +- io.netty:netty-codec-socks:jar:4.2.10.Final:compile (version managed from 4.2.10.Final) +[INFO] | | | | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | | \- (io.netty:netty-codec-base:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | +- (io.netty:netty-codec-http:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | \- (io.netty:netty-handler:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-resolver-dns:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-resolver-dns-native-macos:jar:osx-x86_64:4.2.10.Final:runtime - version managed from 4.2.10.Final; scope managed from compile; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.2.10.Final:runtime - version managed from 4.2.10.Final; scope managed from compile; omitted for duplicate) +[INFO] | | | | +- (io.projectreactor:reactor-core:jar:3.8.3:compile - version managed from 3.8.3; omitted for duplicate) +[INFO] | | | | \- (org.jspecify:jspecify:jar:1.0.0:compile - version managed from 1.0.0; omitted for duplicate) +[INFO] | | | +- (io.projectreactor:reactor-core:jar:3.8.3:compile - version managed from 3.8.3; omitted for duplicate) +[INFO] | | | \- (org.jspecify:jspecify:jar:1.0.0:compile - version managed from 1.0.0; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-web:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | \- (org.springframework.boot:spring-boot-netty:jar:4.0.3:runtime - version managed from 4.0.3; omitted for duplicate) +[INFO] | \- org.springframework.boot:spring-boot-webflux:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-http-codec:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- (org.springframework:spring-web:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | +- org.springframework:spring-webflux:jar:7.0.5:compile (version managed from 7.0.5) +[INFO] | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-web:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | \- (io.projectreactor:reactor-core:jar:3.8.3:compile - version managed from 3.8.3; omitted for duplicate) +[INFO] | \- org.springframework.boot:spring-boot-web-server:jar:4.0.3:compile (version managed from 4.0.3; scope not updated to compile) +[INFO] | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | \- (org.springframework:spring-web:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] +- org.springframework.boot:spring-boot-starter-data-jpa:jar:4.0.3:compile +[INFO] | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-starter-jdbc:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- (org.springframework.boot:spring-boot-jdbc:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- com.zaxxer:HikariCP:jar:7.0.2:compile (version managed from 7.0.2) +[INFO] | | \- (org.slf4j:slf4j-api:jar:2.0.17:compile - version managed from 2.0.17; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-data-jpa:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- org.springframework.boot:spring-boot-data-commons:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | | +- org.springframework.boot:spring-boot-persistence:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | | | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | | | \- (org.springframework:spring-tx:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | \- org.springframework.data:spring-data-commons:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | | +- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | | \- (org.slf4j:slf4j-api:jar:2.0.17:compile - version managed from 2.0.17; omitted for duplicate) +[INFO] | | +- org.springframework.boot:spring-boot-hibernate:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | | +- org.springframework.boot:spring-boot-jpa:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | | | +- (org.springframework.boot:spring-boot-jdbc:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | | | +- (org.springframework.boot:spring-boot-transaction:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | | | +- jakarta.persistence:jakarta.persistence-api:jar:3.2.0:compile (version managed from 3.2.0) +[INFO] | | | | \- (org.springframework:spring-orm:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | +- org.hibernate.orm:hibernate-core:jar:7.2.4.Final:compile (version managed from 7.2.4.Final) +[INFO] | | | | +- (jakarta.persistence:jakarta.persistence-api:jar:3.2.0:compile - version managed from 3.2.0; omitted for duplicate) +[INFO] | | | | +- jakarta.transaction:jakarta.transaction-api:jar:2.0.1:compile (version managed from 2.0.1) +[INFO] | | | | +- (org.jboss.logging:jboss-logging:jar:3.6.2.Final:runtime - version managed from 3.6.1.Final; omitted for duplicate) +[INFO] | | | | +- org.hibernate.models:hibernate-models:jar:1.0.1:runtime +[INFO] | | | | | \- (org.jboss.logging:jboss-logging:jar:3.6.2.Final:runtime - version managed from 3.5.0.Final; omitted for duplicate) +[INFO] | | | | +- (com.fasterxml:classmate:jar:1.7.3:runtime - version managed from 1.7.1; omitted for duplicate) +[INFO] | | | | +- (net.bytebuddy:byte-buddy:jar:1.17.8:runtime - version managed from 1.17.8; omitted for duplicate) +[INFO] | | | | +- (jakarta.xml.bind:jakarta.xml.bind-api:jar:4.0.4:runtime - version managed from 4.0.4; omitted for duplicate) +[INFO] | | | | +- org.glassfish.jaxb:jaxb-runtime:jar:4.0.6:runtime (version managed from 4.0.6) +[INFO] | | | | | \- org.glassfish.jaxb:jaxb-core:jar:4.0.6:runtime (version managed from 4.0.6) +[INFO] | | | | | +- (jakarta.xml.bind:jakarta.xml.bind-api:jar:4.0.4:runtime - version managed from 4.0.4; omitted for duplicate) +[INFO] | | | | | +- (jakarta.activation:jakarta.activation-api:jar:2.1.4:runtime - version managed from 2.1.4; omitted for duplicate) +[INFO] | | | | | +- org.eclipse.angus:angus-activation:jar:2.0.3:runtime +[INFO] | | | | | | \- (jakarta.activation:jakarta.activation-api:jar:2.1.4:runtime - version managed from 2.1.4; omitted for duplicate) +[INFO] | | | | | +- org.glassfish.jaxb:txw2:jar:4.0.6:runtime (version managed from 4.0.6) +[INFO] | | | | | \- com.sun.istack:istack-commons-runtime:jar:4.1.2:runtime +[INFO] | | | | +- jakarta.inject:jakarta.inject-api:jar:2.0.1:runtime (version managed from 2.0.1) +[INFO] | | | | \- (org.antlr:antlr4-runtime:jar:4.13.2:runtime - omitted for duplicate) +[INFO] | | | \- org.springframework:spring-orm:jar:7.0.5:compile (version managed from 7.0.5) +[INFO] | | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-jdbc:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | \- (org.springframework:spring-tx:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | +- org.springframework.data:spring-data-jpa:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | | +- (org.springframework.data:spring-data-commons:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-orm:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-context:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-aop:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-tx:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | | +- org.antlr:antlr4-runtime:jar:4.13.2:compile +[INFO] | | | +- (jakarta.annotation:jakarta.annotation-api:jar:3.0.0:compile - version managed from 2.0.0; omitted for duplicate) +[INFO] | | | \- (org.slf4j:slf4j-api:jar:2.0.17:compile - version managed from 2.0.17; omitted for duplicate) +[INFO] | | \- org.springframework:spring-aspects:jar:7.0.5:compile (version managed from 7.0.5) +[INFO] | | \- org.aspectj:aspectjweaver:jar:1.9.25.1:compile (version managed from 1.9.25) +[INFO] | \- org.springframework.boot:spring-boot-jdbc:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-sql:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | \- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-transaction:jar:4.0.3:compile (version managed from 4.0.3; scope not updated to compile) +[INFO] | | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- (org.springframework.boot:spring-boot-persistence:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- (org.springframework:spring-tx:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | \- org.springframework:spring-jdbc:jar:7.0.5:compile (version managed from 7.0.5) +[INFO] | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | +- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | \- (org.springframework:spring-tx:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] +- org.springframework.boot:spring-boot-starter-data-redis:jar:4.0.3:compile +[INFO] | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | \- org.springframework.boot:spring-boot-data-redis:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- (org.springframework.boot:spring-boot-data-commons:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- (org.springframework.boot:spring-boot-transaction:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- io.lettuce:lettuce-core:jar:6.8.2.RELEASE:compile (version managed from 6.8.2.RELEASE) +[INFO] | | +- redis.clients.authentication:redis-authx-core:jar:0.1.1-beta2:compile +[INFO] | | | \- (org.slf4j:slf4j-api:jar:2.0.17:compile - version managed from 1.7.36; omitted for duplicate) +[INFO] | | +- io.netty:netty-common:jar:4.2.10.Final:compile (version managed from 4.1.125.Final) +[INFO] | | +- io.netty:netty-handler:jar:4.2.10.Final:compile (version managed from 4.1.125.Final) +[INFO] | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- io.netty:netty-resolver:jar:4.2.10.Final:compile (version managed from 4.2.10.Final) +[INFO] | | | | \- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- io.netty:netty-buffer:jar:4.2.10.Final:compile (version managed from 4.2.10.Final) +[INFO] | | | | \- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- io.netty:netty-transport-native-unix-common:jar:4.2.10.Final:compile (version managed from 4.2.10.Final; scope not updated to compile) +[INFO] | | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | \- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | \- io.netty:netty-codec-base:jar:4.2.10.Final:compile (version managed from 4.2.10.Final) +[INFO] | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | \- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | +- io.netty:netty-transport:jar:4.2.10.Final:compile (version managed from 4.1.125.Final) +[INFO] | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | \- (io.netty:netty-resolver:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | +- (io.projectreactor:reactor-core:jar:3.8.3:compile - version managed from 3.6.6; omitted for duplicate) +[INFO] | | \- io.netty:netty-resolver-dns:jar:4.2.10.Final:compile (version managed from 4.1.125.Final; scope not updated to compile) +[INFO] | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | +- (io.netty:netty-resolver:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | +- (io.netty:netty-codec-base:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | +- io.netty:netty-codec-dns:jar:4.2.10.Final:compile (version managed from 4.2.10.Final) +[INFO] | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | \- (io.netty:netty-codec-base:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | \- (io.netty:netty-handler:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | +- org.springframework.data:spring-data-redis:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- org.springframework.data:spring-data-keyvalue:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | | +- (org.springframework.data:spring-data-commons:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-context:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-tx:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | | \- (org.slf4j:slf4j-api:jar:2.0.17:compile - version managed from 2.0.17; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-tx:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | +- org.springframework:spring-oxm:jar:7.0.5:compile (version managed from 7.0.3) +[INFO] | | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | \- (jakarta.xml.bind:jakarta.xml.bind-api:jar:4.0.4:runtime - version managed from 3.0.1; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-aop:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | +- org.springframework:spring-context-support:jar:7.0.5:compile (version managed from 7.0.3) +[INFO] | | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-context:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | \- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | \- (org.slf4j:slf4j-api:jar:2.0.17:compile - version managed from 2.0.17; omitted for duplicate) +[INFO] | \- org.springframework.boot:spring-boot-netty:jar:4.0.3:runtime (version managed from 4.0.3) +[INFO] | +- (org.springframework.boot:spring-boot:jar:4.0.3:runtime - version managed from 4.0.3; omitted for duplicate) +[INFO] | \- (io.netty:netty-common:jar:4.2.10.Final:runtime - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] +- org.springframework.boot:spring-boot-starter-validation:jar:4.0.3:compile +[INFO] | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | \- org.springframework.boot:spring-boot-validation:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.apache.tomcat.embed:tomcat-embed-el:jar:11.0.18:compile (version managed from 11.0.18) +[INFO] | \- org.hibernate.validator:hibernate-validator:jar:9.0.1.Final:compile (version managed from 9.0.1.Final) +[INFO] | +- jakarta.validation:jakarta.validation-api:jar:3.1.1:compile (version managed from 3.1.1) +[INFO] | +- org.jboss.logging:jboss-logging:jar:3.6.2.Final:compile (version managed from 3.6.1.Final; scope not updated to compile) +[INFO] | \- com.fasterxml:classmate:jar:1.7.3:compile (version managed from 1.7.0; scope not updated to compile) +[INFO] +- org.springframework.boot:spring-boot-starter-actuator:jar:4.0.3:compile +[INFO] | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-starter-micrometer-metrics:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- org.springframework.boot:spring-boot-micrometer-metrics:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- org.springframework.boot:spring-boot-micrometer-observation:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | | \- (io.micrometer:micrometer-observation:jar:1.16.3:compile - version managed from 1.16.3; omitted for duplicate) +[INFO] | | \- (io.micrometer:micrometer-core:jar:1.16.3:compile - version managed from 1.16.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-actuator-autoconfigure:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot-autoconfigure:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- org.springframework.boot:spring-boot-actuator:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | \- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-health:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | \- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- io.micrometer:micrometer-observation:jar:1.16.3:compile (version managed from 1.16.3) +[INFO] | | +- (org.jspecify:jspecify:jar:1.0.0:compile - version managed from 1.0.0; omitted for duplicate) +[INFO] | | \- io.micrometer:micrometer-commons:jar:1.16.3:compile (version managed from 1.16.3) +[INFO] | | \- (org.jspecify:jspecify:jar:1.0.0:compile - version managed from 1.0.0; omitted for duplicate) +[INFO] | \- io.micrometer:micrometer-jakarta9:jar:1.16.3:compile (version managed from 1.16.3) +[INFO] | +- (org.jspecify:jspecify:jar:1.0.0:compile - version managed from 1.0.0; omitted for duplicate) +[INFO] | +- io.micrometer:micrometer-core:jar:1.16.3:compile (version managed from 1.16.3) +[INFO] | | +- (org.jspecify:jspecify:jar:1.0.0:compile - version managed from 1.0.0; omitted for duplicate) +[INFO] | | +- (io.micrometer:micrometer-commons:jar:1.16.3:compile - version managed from 1.16.3; omitted for duplicate) +[INFO] | | +- (io.micrometer:micrometer-observation:jar:1.16.3:compile - version managed from 1.16.3; omitted for duplicate) +[INFO] | | +- org.hdrhistogram:HdrHistogram:jar:2.2.2:runtime +[INFO] | | \- org.latencyutils:LatencyUtils:jar:2.0.3:runtime +[INFO] | +- (io.micrometer:micrometer-commons:jar:1.16.3:compile - version managed from 1.16.3; omitted for duplicate) +[INFO] | \- (io.micrometer:micrometer-observation:jar:1.16.3:compile - version managed from 1.16.3; omitted for duplicate) +[INFO] +- org.springframework.boot:spring-boot-starter-security:jar:4.0.3:compile +[INFO] | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-security:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- org.springframework.security:spring-security-config:jar:7.0.3:compile (version managed from 7.0.3) +[INFO] | | | +- (org.springframework.security:spring-security-core:jar:7.0.3:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-aop:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-context:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | | | \- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | | \- (org.springframework.security:spring-security-web:jar:7.0.3:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | \- org.springframework:spring-aop:jar:7.0.5:compile (version managed from 7.0.5) +[INFO] | +- org.springframework:spring-beans:jar:7.0.5:compile (version managed from 7.0.5) +[INFO] | | \- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | \- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] +- com.devskiller:toolkit:jar:1.3-38:compile +[INFO] | +- org.slf4j:slf4j-api:jar:2.0.17:compile (version managed from 2.0.7) +[INFO] | +- (com.nimbusds:nimbus-jose-jwt:jar:10.5:compile - omitted for conflict with 9.37) +[INFO] | +- org.bouncycastle:bcprov-jdk18on:jar:1.82:compile +[INFO] | +- org.bouncycastle:bcpkix-jdk18on:jar:1.82:compile +[INFO] | | \- org.bouncycastle:bcutil-jdk18on:jar:1.82:compile +[INFO] | | \- (org.bouncycastle:bcprov-jdk18on:jar:1.82:compile - omitted for duplicate) +[INFO] | +- com.courier:courier-java:jar:3.3.0:compile +[INFO] | | +- (com.squareup.okhttp3:okhttp:jar:4.12.0:compile - omitted for conflict with 5.0.0-alpha.14) +[INFO] | | +- (com.fasterxml.jackson.core:jackson-databind:jar:2.20.2:compile - version managed from 2.13.0; omitted for duplicate) +[INFO] | | +- com.fasterxml.jackson.datatype:jackson-datatype-jdk8:jar:2.20.2:compile (version managed from 2.12.3) +[INFO] | | | +- (com.fasterxml.jackson.core:jackson-core:jar:2.20.2:compile - version managed from 2.20.2; omitted for duplicate) +[INFO] | | | \- (com.fasterxml.jackson.core:jackson-databind:jar:2.20.2:compile - version managed from 2.20.2; omitted for duplicate) +[INFO] | | \- (com.fasterxml.jackson.datatype:jackson-datatype-jsr310:jar:2.20.2:compile - version managed from 2.12.3; omitted for duplicate) +[INFO] | +- (com.devskiller.friendly-id:friendly-id:jar:1.1.0:compile - omitted for conflict with 2.0.0-SNAPSHOT) +[INFO] | +- org.apache.commons:commons-lang3:jar:3.19.0:compile (version managed from 3.13.0; scope not updated to compile) +[INFO] | +- jakarta.servlet:jakarta.servlet-api:jar:6.1.0:compile (version managed from 6.0.0) +[INFO] | +- com.github.ulisesbocchio:jasypt-spring-boot-starter:jar:3.0.5:compile +[INFO] | | \- com.github.ulisesbocchio:jasypt-spring-boot:jar:3.0.5:compile +[INFO] | | \- org.jasypt:jasypt:jar:1.9.3:compile +[INFO] | +- io.github.resilience4j:resilience4j-retry:jar:2.0.2:compile +[INFO] | | +- io.github.resilience4j:resilience4j-core:jar:2.0.2:compile +[INFO] | | | \- (org.slf4j:slf4j-api:jar:2.0.17:runtime - version managed from 1.7.30; omitted for duplicate) +[INFO] | | \- (org.slf4j:slf4j-api:jar:2.0.17:runtime - version managed from 1.7.30; omitted for duplicate) +[INFO] | \- io.vavr:vavr:jar:0.10.4:compile +[INFO] | \- io.vavr:vavr-match:jar:0.10.4:compile +[INFO] +- com.devskiller.friendly-id:friendly-id:jar:2.0.0-SNAPSHOT:compile (scope not updated to compile) +[INFO] | \- org.jspecify:jspecify:jar:1.0.0:compile (version managed from 1.0.0; scope not updated to compile) +[INFO] +- com.devskiller.friendly-id:friendly-id-jackson2-datatype:jar:2.0.0-SNAPSHOT:compile +[INFO] | +- (com.devskiller.friendly-id:friendly-id:jar:2.0.0-SNAPSHOT:compile - omitted for duplicate) +[INFO] | +- com.fasterxml.jackson.core:jackson-annotations:jar:2.20:compile (version managed from 2.18.2) +[INFO] | +- com.fasterxml.jackson.core:jackson-core:jar:2.20.2:compile (version managed from 2.18.2) +[INFO] | +- com.fasterxml.jackson.core:jackson-databind:jar:2.20.2:compile (version managed from 2.18.2) +[INFO] | | +- (com.fasterxml.jackson.core:jackson-annotations:jar:2.20:compile - version managed from 2.20; omitted for duplicate) +[INFO] | | \- (com.fasterxml.jackson.core:jackson-core:jar:2.20.2:compile - version managed from 2.20.2; omitted for duplicate) +[INFO] | \- com.fasterxml.jackson.module:jackson-module-parameter-names:jar:2.20.2:compile (version managed from 2.18.2) +[INFO] | +- (com.fasterxml.jackson.core:jackson-core:jar:2.20.2:compile - version managed from 2.20.2; omitted for duplicate) +[INFO] | \- (com.fasterxml.jackson.core:jackson-databind:jar:2.20.2:compile - version managed from 2.20.2; omitted for duplicate) +[INFO] +- com.devskiller.friendly-id:friendly-id-jackson-datatype:jar:2.0.0-SNAPSHOT:compile +[INFO] | +- (com.devskiller.friendly-id:friendly-id:jar:2.0.0-SNAPSHOT:compile - omitted for duplicate) +[INFO] | +- (com.fasterxml.jackson.core:jackson-annotations:jar:2.20:compile - version managed from 2.20; omitted for duplicate) +[INFO] | +- tools.jackson.core:jackson-core:jar:3.0.4:compile (version managed from 3.0.3) +[INFO] | \- tools.jackson.core:jackson-databind:jar:3.0.4:compile (version managed from 3.0.3) +[INFO] | +- (com.fasterxml.jackson.core:jackson-annotations:jar:2.20:compile - version managed from 2.20; omitted for duplicate) +[INFO] | \- (tools.jackson.core:jackson-core:jar:3.0.4:compile - version managed from 3.0.4; omitted for duplicate) +[INFO] +- org.springframework.boot:spring-boot-starter-amqp:jar:4.0.3:compile +[INFO] | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | \- org.springframework.boot:spring-boot-amqp:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | +- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework:spring-messaging:jar:7.0.5:compile (version managed from 7.0.5) +[INFO] | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | \- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | +- (org.springframework.amqp:spring-rabbit:jar:4.0.2:compile - version managed from 4.0.2; omitted for duplicate) +[INFO] | \- (org.springframework.boot:spring-boot-transaction:jar:4.0.3:runtime - version managed from 4.0.3; omitted for duplicate) +[INFO] +- nl.big-o:liqp:jar:0.9.2.3:compile +[INFO] | +- (com.fasterxml.jackson.core:jackson-annotations:jar:2.20:compile - version managed from 2.13.2; omitted for duplicate) +[INFO] | +- (com.fasterxml.jackson.core:jackson-core:jar:2.20.2:compile - version managed from 2.13.2; omitted for duplicate) +[INFO] | +- (com.fasterxml.jackson.core:jackson-databind:jar:2.20.2:compile - version managed from 2.13.4.2; omitted for duplicate) +[INFO] | +- com.fasterxml.jackson.datatype:jackson-datatype-jsr310:jar:2.20.2:compile (version managed from 2.13.2) +[INFO] | | +- (com.fasterxml.jackson.core:jackson-annotations:jar:2.20:compile - version managed from 2.20; omitted for duplicate) +[INFO] | | +- (com.fasterxml.jackson.core:jackson-core:jar:2.20.2:compile - version managed from 2.20.2; omitted for duplicate) +[INFO] | | \- (com.fasterxml.jackson.core:jackson-databind:jar:2.20.2:compile - version managed from 2.20.2; omitted for duplicate) +[INFO] | \- ua.co.k:strftime4j:jar:1.0.6:compile +[INFO] +- com.ibm.icu:icu4j:jar:78.2:compile +[INFO] +- com.mailjet:mailjet-client:jar:6.0.1:compile +[INFO] | +- com.squareup.okhttp3:okhttp:jar:5.0.0-alpha.14:compile +[INFO] | | +- com.squareup.okio:okio-jvm:jar:3.9.0:compile +[INFO] | | | \- (org.jetbrains.kotlin:kotlin-stdlib:jar:2.2.21:compile - version managed from 1.9.21; omitted for duplicate) +[INFO] | | \- org.jetbrains.kotlin:kotlin-stdlib:jar:2.2.21:compile (version managed from 1.9.23) +[INFO] | | \- org.jetbrains:annotations:jar:13.0:compile (scope not updated to compile) +[INFO] | +- org.json:json:jar:20231013:compile +[INFO] | \- com.google.code.gson:gson:jar:2.13.2:compile (version managed from 2.9.0) +[INFO] | \- com.google.errorprone:error_prone_annotations:jar:2.41.0:compile +[INFO] +- com.slack.api:slack-api-client:jar:1.47.0:compile +[INFO] | +- com.slack.api:slack-api-model:jar:1.47.0:compile +[INFO] | | \- (com.google.code.gson:gson:jar:2.13.2:compile - version managed from 2.12.1; omitted for duplicate) +[INFO] | +- (com.squareup.okhttp3:okhttp:jar:4.12.0:compile - omitted for conflict with 5.0.0-alpha.14) +[INFO] | +- (com.google.code.gson:gson:jar:2.13.2:compile - version managed from 2.12.1; omitted for duplicate) +[INFO] | \- (org.slf4j:slf4j-api:jar:2.0.17:compile - version managed from 1.7.36; omitted for duplicate) +[INFO] +- com.nimbusds:nimbus-jose-jwt:jar:9.37:compile (scope not updated to compile) +[INFO] | \- com.github.stephenc.jcip:jcip-annotations:jar:1.0-1:compile +[INFO] +- org.jsoup:jsoup:jar:1.22.1:compile +[INFO] +- commons-validator:commons-validator:jar:1.10.1:compile +[INFO] | +- commons-beanutils:commons-beanutils:jar:1.11.0:compile +[INFO] | | +- (commons-logging:commons-logging:jar:1.3.5:compile - version managed from 1.3.5; omitted for duplicate) +[INFO] | | \- (commons-collections:commons-collections:jar:3.2.2:compile - omitted for duplicate) +[INFO] | +- commons-digester:commons-digester:jar:2.1:compile +[INFO] | +- commons-logging:commons-logging:jar:1.3.5:compile (version managed from 1.3.5) +[INFO] | \- commons-collections:commons-collections:jar:3.2.2:compile +[INFO] +- org.postgresql:postgresql:jar:42.7.10:runtime +[INFO] | \- org.checkerframework:checker-qual:jar:3.52.0:runtime +[INFO] +- org.liquibase:liquibase-core:jar:5.0.1:compile +[INFO] | +- com.opencsv:opencsv:jar:5.12.0:compile +[INFO] | | +- (org.apache.commons:commons-lang3:jar:3.19.0:compile - version managed from 3.18.0; omitted for duplicate) +[INFO] | | +- (org.apache.commons:commons-text:jar:1.13.1:compile - omitted for conflict with 1.14.0) +[INFO] | | \- (org.apache.commons:commons-collections4:jar:4.5.0:compile - omitted for duplicate) +[INFO] | +- org.yaml:snakeyaml:jar:2.5:compile (version managed from 2.5) +[INFO] | +- javax.xml.bind:jaxb-api:jar:2.3.1:compile +[INFO] | +- org.apache.commons:commons-collections4:jar:4.5.0:compile +[INFO] | +- org.apache.commons:commons-text:jar:1.14.0:compile +[INFO] | | \- (org.apache.commons:commons-lang3:jar:3.19.0:compile - version managed from 3.18.0; omitted for duplicate) +[INFO] | +- (org.apache.commons:commons-lang3:jar:3.19.0:compile - version managed from 3.19.0; omitted for duplicate) +[INFO] | \- commons-io:commons-io:jar:2.20.0:compile (scope not updated to compile) +[INFO] +- org.springframework.boot:spring-boot-starter-jooq:jar:4.0.3:compile +[INFO] | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- (org.springframework.boot:spring-boot-starter-jdbc:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-jooq:jar:4.0.3:compile (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot-jdbc:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- (org.springframework.boot:spring-boot-transaction:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | \- (org.springframework.boot:spring-boot-jdbc:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] +- org.jooq.pro:jooq:jar:3.20.8:compile +[INFO] | \- io.r2dbc:r2dbc-spi:jar:1.0.0.RELEASE:compile (version managed from 1.0.0.RELEASE) +[INFO] | \- org.reactivestreams:reactive-streams:jar:1.0.4:compile (version managed from 1.0.3) +[INFO] +- org.projectlombok:lombok:jar:1.18.42:compile +[INFO] +- net.logstash.logback:logstash-logback-encoder:jar:8.0:compile +[INFO] | \- (com.fasterxml.jackson.core:jackson-databind:jar:2.20.2:compile - version managed from 2.17.2; omitted for duplicate) +[INFO] +- com.datadoghq:dd-trace-api:jar:1.60.1:compile +[INFO] | \- (org.slf4j:slf4j-api:jar:2.0.17:compile - version managed from 1.7.30; omitted for duplicate) +[INFO] +- org.springframework.boot:spring-boot-starter-test:jar:4.0.3:test +[INFO] | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-test:jar:4.0.3:test (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- (org.springframework:spring-test:jar:7.0.5:test - version managed from 7.0.5; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-test-autoconfigure:jar:4.0.3:test (version managed from 4.0.3) +[INFO] | | \- (org.springframework.boot:spring-boot-test:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- com.jayway.jsonpath:json-path:jar:2.10.0:test (version managed from 2.10.0) +[INFO] | | +- (net.minidev:json-smart:jar:2.6.0:test - version managed from 2.6.0; omitted for duplicate) +[INFO] | | \- (org.slf4j:slf4j-api:jar:2.0.17:test - version managed from 2.0.17; omitted for duplicate) +[INFO] | +- jakarta.xml.bind:jakarta.xml.bind-api:jar:4.0.4:runtime (version managed from 4.0.4; scope not updated to runtime) +[INFO] | | \- jakarta.activation:jakarta.activation-api:jar:2.1.4:runtime (version managed from 2.1.4) +[INFO] | +- net.minidev:json-smart:jar:2.6.0:test (version managed from 2.6.0) +[INFO] | | \- net.minidev:accessors-smart:jar:2.6.0:test +[INFO] | | \- org.ow2.asm:asm:jar:9.7.1:test +[INFO] | +- org.assertj:assertj-core:jar:3.27.7:test (version managed from 3.27.7) +[INFO] | | \- net.bytebuddy:byte-buddy:jar:1.17.8:runtime (version managed from 1.18.3; scope not updated to runtime) +[INFO] | +- (org.awaitility:awaitility:jar:4.3.0:test - version managed from 4.3.0; omitted for duplicate) +[INFO] | +- org.hamcrest:hamcrest:jar:3.0:test (version managed from 3.0) +[INFO] | +- org.junit.jupiter:junit-jupiter:jar:6.0.3:test (version managed from 6.0.3) +[INFO] | | +- org.junit.jupiter:junit-jupiter-api:jar:6.0.3:test (version managed from 6.0.3) +[INFO] | | | +- org.opentest4j:opentest4j:jar:1.3.0:test +[INFO] | | | +- org.junit.platform:junit-platform-commons:jar:6.0.3:test (version managed from 6.0.3) +[INFO] | | | | +- (org.apiguardian:apiguardian-api:jar:1.1.2:test - omitted for duplicate) +[INFO] | | | | \- (org.jspecify:jspecify:jar:1.0.0:test - version managed from 1.0.0; omitted for duplicate) +[INFO] | | | +- org.apiguardian:apiguardian-api:jar:1.1.2:test +[INFO] | | | \- (org.jspecify:jspecify:jar:1.0.0:test - version managed from 1.0.0; omitted for duplicate) +[INFO] | | +- org.junit.jupiter:junit-jupiter-params:jar:6.0.3:test (version managed from 6.0.3) +[INFO] | | | +- (org.junit.jupiter:junit-jupiter-api:jar:6.0.3:test - version managed from 6.0.3; omitted for duplicate) +[INFO] | | | +- (org.apiguardian:apiguardian-api:jar:1.1.2:test - omitted for duplicate) +[INFO] | | | \- (org.jspecify:jspecify:jar:1.0.0:test - version managed from 1.0.0; omitted for duplicate) +[INFO] | | \- org.junit.jupiter:junit-jupiter-engine:jar:6.0.3:test (version managed from 6.0.3) +[INFO] | | +- org.junit.platform:junit-platform-engine:jar:6.0.3:test (version managed from 6.0.3) +[INFO] | | | +- (org.opentest4j:opentest4j:jar:1.3.0:test - omitted for duplicate) +[INFO] | | | +- (org.junit.platform:junit-platform-commons:jar:6.0.3:test - version managed from 6.0.3; omitted for duplicate) +[INFO] | | | +- (org.apiguardian:apiguardian-api:jar:1.1.2:test - omitted for duplicate) +[INFO] | | | \- (org.jspecify:jspecify:jar:1.0.0:test - version managed from 1.0.0; omitted for duplicate) +[INFO] | | +- (org.junit.jupiter:junit-jupiter-api:jar:6.0.3:test - version managed from 6.0.3; omitted for duplicate) +[INFO] | | +- (org.apiguardian:apiguardian-api:jar:1.1.2:test - omitted for duplicate) +[INFO] | | \- (org.jspecify:jspecify:jar:1.0.0:test - version managed from 1.0.0; omitted for duplicate) +[INFO] | +- org.mockito:mockito-core:jar:5.20.0:test (version managed from 5.20.0) +[INFO] | | +- (net.bytebuddy:byte-buddy:jar:1.17.8:test - version managed from 1.17.7; omitted for duplicate) +[INFO] | | +- net.bytebuddy:byte-buddy-agent:jar:1.17.8:test (version managed from 1.17.7) +[INFO] | | \- org.objenesis:objenesis:jar:3.3:test +[INFO] | +- org.mockito:mockito-junit-jupiter:jar:5.20.0:test (version managed from 5.20.0) +[INFO] | | +- (org.mockito:mockito-core:jar:5.20.0:test - version managed from 5.20.0; omitted for duplicate) +[INFO] | | \- (org.junit.jupiter:junit-jupiter-api:jar:6.0.3:test - version managed from 5.13.4; omitted for duplicate) +[INFO] | +- org.skyscreamer:jsonassert:jar:1.5.3:test (version managed from 1.5.3) +[INFO] | | \- com.vaadin.external.google:android-json:jar:0.0.20131108.vaadin1:test +[INFO] | +- org.springframework:spring-core:jar:7.0.5:compile (version managed from 7.0.5; scope not updated to compile) +[INFO] | | +- (commons-logging:commons-logging:jar:1.3.5:compile - version managed from 1.3.5; omitted for duplicate) +[INFO] | | \- (org.jspecify:jspecify:jar:1.0.0:compile - version managed from 1.0.0; omitted for duplicate) +[INFO] | +- org.springframework:spring-test:jar:7.0.5:test (version managed from 7.0.5) +[INFO] | | \- (org.springframework:spring-core:jar:7.0.5:test - version managed from 7.0.5; omitted for duplicate) +[INFO] | \- org.xmlunit:xmlunit-core:jar:2.10.4:test (version managed from 2.10.4) +[INFO] | \- (jakarta.xml.bind:jakarta.xml.bind-api:jar:4.0.4:test - version managed from 2.3.3; omitted for duplicate) +[INFO] +- org.springframework.boot:spring-boot-starter-webmvc-test:jar:4.0.3:test +[INFO] | +- org.springframework.boot:spring-boot-starter-jackson-test:jar:4.0.3:test (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot-starter-jackson:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- (org.springframework.boot:spring-boot-starter-test:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- (org.springframework.boot:spring-boot-starter-test:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-starter-webmvc:jar:4.0.3:test (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot-starter:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- (org.springframework.boot:spring-boot-starter-jackson:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- (org.springframework.boot:spring-boot-starter-tomcat:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- (org.springframework.boot:spring-boot-http-converter:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- (org.springframework.boot:spring-boot-webmvc:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- org.springframework.boot:spring-boot-webmvc-test:jar:4.0.3:test (version managed from 4.0.3) +[INFO] | | +- (org.springframework.boot:spring-boot-test-autoconfigure:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- (org.springframework.boot:spring-boot-webmvc:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | | +- (org.springframework.boot:spring-boot-http-converter:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | | \- (org.springframework.boot:spring-boot-web-server:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | \- org.springframework.boot:spring-boot-resttestclient:jar:4.0.3:test (version managed from 4.0.3) +[INFO] | +- (org.springframework.boot:spring-boot-test:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | +- (org.springframework.boot:spring-boot-http-converter:jar:4.0.3:test - version managed from 4.0.3; omitted for duplicate) +[INFO] | \- (org.springframework:spring-web:jar:7.0.5:test - version managed from 7.0.5; omitted for duplicate) +[INFO] +- org.springframework.security:spring-security-test:jar:7.0.3:test +[INFO] | +- org.springframework.security:spring-security-core:jar:7.0.3:compile (version managed from 7.0.3; scope not updated to compile) +[INFO] | | +- org.springframework.security:spring-security-crypto:jar:7.0.3:compile (version managed from 7.0.3) +[INFO] | | +- (org.springframework:spring-aop:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | | +- org.springframework:spring-context:jar:7.0.5:compile (version managed from 7.0.4) +[INFO] | | | +- (org.springframework:spring-aop:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | +- (org.springframework:spring-expression:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | \- (io.micrometer:micrometer-observation:jar:1.16.3:compile - version managed from 1.16.3; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | | +- org.springframework:spring-expression:jar:7.0.5:compile (version managed from 7.0.4) +[INFO] | | | \- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | \- (io.micrometer:micrometer-observation:jar:1.16.3:compile - version managed from 1.16.3; omitted for duplicate) +[INFO] | +- org.springframework.security:spring-security-web:jar:7.0.3:compile (version managed from 7.0.3; scope not updated to compile) +[INFO] | | +- (org.springframework.security:spring-security-core:jar:7.0.3:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-aop:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-context:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-expression:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | | \- (org.springframework:spring-web:jar:7.0.5:compile - version managed from 7.0.4; omitted for duplicate) +[INFO] | +- (org.springframework:spring-core:jar:7.0.5:test - version managed from 7.0.4; omitted for duplicate) +[INFO] | \- (org.springframework:spring-test:jar:7.0.5:test - version managed from 7.0.4; omitted for duplicate) +[INFO] +- org.springframework.boot:spring-boot-testcontainers:jar:4.0.3:test +[INFO] | +- org.springframework.boot:spring-boot-autoconfigure:jar:4.0.3:compile (version managed from 4.0.3; scope not updated to compile) +[INFO] | | \- (org.springframework.boot:spring-boot:jar:4.0.3:compile - version managed from 4.0.3; omitted for duplicate) +[INFO] | \- org.testcontainers:testcontainers:jar:2.0.3:test (version managed from 2.0.3) +[INFO] | +- (org.slf4j:slf4j-api:jar:2.0.17:test - version managed from 1.7.36; omitted for duplicate) +[INFO] | +- org.apache.commons:commons-compress:jar:1.28.0:test +[INFO] | | +- commons-codec:commons-codec:jar:1.19.0:test (version managed from 1.19.0) +[INFO] | | +- (commons-io:commons-io:jar:2.20.0:test - omitted for duplicate) +[INFO] | | \- (org.apache.commons:commons-lang3:jar:3.19.0:test - version managed from 3.18.0; omitted for duplicate) +[INFO] | +- org.rnorth.duct-tape:duct-tape:jar:1.0.8:test +[INFO] | | \- (org.jetbrains:annotations:jar:17.0.0:test - omitted for conflict with 13.0) +[INFO] | +- com.github.docker-java:docker-java-api:jar:3.7.0:test +[INFO] | | +- (com.fasterxml.jackson.core:jackson-annotations:jar:2.20:test - version managed from 2.20; omitted for duplicate) +[INFO] | | \- (org.slf4j:slf4j-api:jar:2.0.17:test - version managed from 1.7.30; omitted for duplicate) +[INFO] | \- com.github.docker-java:docker-java-transport-zerodep:jar:3.7.0:test +[INFO] | +- com.github.docker-java:docker-java-transport:jar:3.7.0:test +[INFO] | +- (org.slf4j:slf4j-api:jar:2.0.17:test - version managed from 1.7.36; omitted for duplicate) +[INFO] | \- net.java.dev.jna:jna:jar:5.18.1:test +[INFO] +- org.springframework.amqp:spring-rabbit-test:jar:4.0.2:test +[INFO] | +- org.springframework.amqp:spring-rabbit:jar:4.0.2:compile (version managed from 4.0.2; scope not updated to compile) +[INFO] | | +- org.springframework.amqp:spring-amqp:jar:4.0.2:compile (version managed from 4.0.2) +[INFO] | | | \- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | +- com.rabbitmq:amqp-client:jar:5.27.1:compile (version managed from 5.27.1; scope not updated to compile) +[INFO] | | | +- (org.slf4j:slf4j-api:jar:2.0.17:compile - version managed from 1.7.36; omitted for duplicate) +[INFO] | | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.7.Final; omitted for duplicate) +[INFO] | | | +- io.netty:netty-codec:jar:4.2.10.Final:compile (version managed from 4.2.7.Final) +[INFO] | | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-codec-base:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- io.netty:netty-codec-compression:jar:4.2.10.Final:compile (version managed from 4.2.10.Final) +[INFO] | | | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | \- (io.netty:netty-codec-base:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- io.netty:netty-codec-protobuf:jar:4.2.10.Final:compile (version managed from 4.2.10.Final) +[INFO] | | | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | | \- (io.netty:netty-codec-base:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | \- io.netty:netty-codec-marshalling:jar:4.2.10.Final:compile (version managed from 4.2.10.Final) +[INFO] | | | | +- (io.netty:netty-common:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-buffer:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | +- (io.netty:netty-transport:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | | \- (io.netty:netty-codec-base:jar:4.2.10.Final:compile - version managed from 4.2.10.Final; omitted for duplicate) +[INFO] | | | \- (io.netty:netty-handler:jar:4.2.10.Final:compile - version managed from 4.2.7.Final; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-context:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-messaging:jar:7.0.5:compile - version managed from 7.0.3; omitted for duplicate) +[INFO] | | +- org.springframework:spring-tx:jar:7.0.5:compile (version managed from 7.0.3) +[INFO] | | | +- (org.springframework:spring-beans:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | | \- (org.springframework:spring-core:jar:7.0.5:compile - version managed from 7.0.5; omitted for duplicate) +[INFO] | | \- (io.micrometer:micrometer-observation:jar:1.16.3:compile - version managed from 1.16.2; omitted for duplicate) +[INFO] | +- org.springframework.amqp:spring-rabbit-junit:jar:4.0.2:test (version managed from 4.0.2) +[INFO] | | +- (org.springframework:spring-core:jar:7.0.5:test - version managed from 7.0.3; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-test:jar:7.0.5:test - version managed from 7.0.3; omitted for duplicate) +[INFO] | | +- (com.rabbitmq:amqp-client:jar:5.27.1:test - version managed from 5.27.1; omitted for duplicate) +[INFO] | | +- (org.springframework:spring-web:jar:7.0.5:test - version managed from 7.0.3; omitted for duplicate) +[INFO] | | +- (org.junit.jupiter:junit-jupiter-api:jar:6.0.3:test - version managed from 6.0.2; omitted for duplicate) +[INFO] | | \- (org.assertj:assertj-core:jar:3.27.7:test - version managed from 3.27.6; omitted for duplicate) +[INFO] | +- org.hamcrest:hamcrest-library:jar:3.0:test (version managed from 3.0) +[INFO] | | \- (org.hamcrest:hamcrest-core:jar:3.0:test - version managed from 3.0; omitted for duplicate) +[INFO] | +- org.hamcrest:hamcrest-core:jar:3.0:test (version managed from 3.0) +[INFO] | | \- (org.hamcrest:hamcrest:jar:3.0:test - version managed from 3.0; omitted for duplicate) +[INFO] | \- (org.mockito:mockito-core:jar:5.20.0:test - version managed from 5.20.0; omitted for duplicate) +[INFO] +- io.projectreactor:reactor-test:jar:3.8.3:test +[INFO] | +- io.projectreactor:reactor-core:jar:3.8.3:compile (version managed from 3.8.3; scope not updated to compile) +[INFO] | | +- (org.reactivestreams:reactive-streams:jar:1.0.4:compile - version managed from 1.0.4; omitted for duplicate) +[INFO] | | \- (org.jspecify:jspecify:jar:1.0.0:compile - version managed from 1.0.0; omitted for duplicate) +[INFO] | \- (org.jspecify:jspecify:jar:1.0.0:test - version managed from 1.0.0; omitted for duplicate) +[INFO] +- org.awaitility:awaitility:jar:4.3.0:test (scope not updated to test) +[INFO] | \- (org.hamcrest:hamcrest:jar:3.0:test - version managed from 2.1; omitted for duplicate) +[INFO] +- org.testcontainers:testcontainers-junit-jupiter:jar:2.0.3:test +[INFO] | \- (org.testcontainers:testcontainers:jar:2.0.3:test - version managed from 2.0.3; omitted for duplicate) +[INFO] +- org.testcontainers:testcontainers-postgresql:jar:2.0.3:test +[INFO] | \- org.testcontainers:testcontainers-jdbc:jar:2.0.3:test (version managed from 2.0.3) +[INFO] | \- org.testcontainers:testcontainers-database-commons:jar:2.0.3:test (version managed from 2.0.3) +[INFO] | \- (org.testcontainers:testcontainers:jar:2.0.3:test - version managed from 2.0.3; omitted for duplicate) +[INFO] \- org.testcontainers:testcontainers-rabbitmq:jar:2.0.3:test +[INFO] \- (org.testcontainers:testcontainers:jar:2.0.3:test - version managed from 2.0.3; omitted for duplicate) +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD SUCCESS +[INFO] ------------------------------------------------------------------------ +[INFO] Total time: 0.561 s +[INFO] Finished at: 2026-03-09T20:37:27+01:00 +[INFO] ------------------------------------------------------------------------ diff --git a/tests/fixtures/mvn_dep_tree_conflicts.txt b/tests/fixtures/mvn_dep_tree_conflicts.txt new file mode 100644 index 000000000..72e8bd9ac --- /dev/null +++ b/tests/fixtures/mvn_dep_tree_conflicts.txt @@ -0,0 +1,16 @@ +[INFO] Scanning for projects... +[INFO] +[INFO] --- dependency:3.7.0:tree (default-cli) @ my-app --- +[INFO] com.example:my-app:jar:1.0.0 +[INFO] +- com.example:lib-a:jar:1.0.0:compile +[INFO] | +- com.fasterxml.jackson.core:jackson-databind:jar:2.18.3:compile +[INFO] | | +- com.fasterxml.jackson.core:jackson-core:jar:2.18.3:compile +[INFO] | | \- com.fasterxml.jackson.core:jackson-annotations:jar:2.18.3:compile +[INFO] | \- org.slf4j:slf4j-api:jar:2.0.9:compile +[INFO] +- com.example:lib-b:jar:2.0.0:compile +[INFO] | +- (com.fasterxml.jackson.core:jackson-databind:jar:2.17.0:compile - omitted for conflict with 2.18.3) +[INFO] | \- (org.slf4j:slf4j-api:jar:2.0.7:compile - omitted for conflict with 2.0.9) +[INFO] \- com.example:lib-c:jar:3.0.0:compile +[INFO] \- (com.fasterxml.jackson.core:jackson-databind:jar:2.19.0:compile - omitted for conflict with 2.18.3) +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD SUCCESS diff --git a/tests/fixtures/mvn_dep_tree_large.txt b/tests/fixtures/mvn_dep_tree_large.txt new file mode 100644 index 000000000..203d81500 --- /dev/null +++ b/tests/fixtures/mvn_dep_tree_large.txt @@ -0,0 +1,142 @@ +[INFO] Loaded 22524 auto-discovered prefixes for remote repository central (prefixes-central.txt) +[INFO] Loaded 74 auto-discovered prefixes for remote repository apache.snapshots (prefixes-apache.snapshots.txt) +[INFO] Scanning for projects... +[WARNING] Could not transfer metadata /.meta/prefixes.txt from/to repo-releases (https://repo.example.com/releases/): Checksum validation failed +[INFO] Loaded 22524 auto-discovered prefixes for remote repository central (prefixes-central.txt) +[WARNING] +[WARNING] 1 problem was encountered while building the effective model for 'com.example.demo:webapp:jar:2.0-SNAPSHOT' (use -e to see details) +[WARNING] +[WARNING] It is highly recommended to fix these problems because they threaten the stability of your build. +[WARNING] +[INFO] +[INFO] -------------------------------------------------< com.example.demo:webapp >-------------------------------------------------- +[INFO] Building webapp 2.0-SNAPSHOT +[INFO] from pom.xml +[INFO] ---------------------------------------------------------[ jar ]---------------------------------------------------------- +[WARNING] 130 problems were encountered while building the effective model for 'org.redisson:redisson-spring-boot-starter:jar:4.0.0' during dependency collection step for project (use -X to see details) +[WARNING] 76 problems were encountered while building the effective model for 'com.playtika.testcontainers:embedded-redis:jar:3.1.11' during dependency collection step for project (use -X to see details) +[INFO] +[INFO] --- dependency:3.9.0:tree (default-cli) @ webapp --- +[INFO] com.example.demo:webapp:jar:2.0-SNAPSHOT +[INFO] +- org.springframework.boot:spring-boot-starter-actuator:jar:4.0.5:compile +[INFO] | +- org.springframework.boot:spring-boot-starter:jar:4.0.5:compile +[INFO] | | +- org.springframework.boot:spring-boot-starter-logging:jar:4.0.5:compile +[INFO] | | | +- ch.qos.logback:logback-classic:jar:1.5.32:compile +[INFO] | | | | \- ch.qos.logback:logback-core:jar:1.5.32:compile +[INFO] | | | +- org.apache.logging.log4j:log4j-to-slf4j:jar:2.25.3:compile +[INFO] | | | | \- org.apache.logging.log4j:log4j-api:jar:2.25.3:compile +[INFO] | | | \- org.slf4j:jul-to-slf4j:jar:2.0.17:compile +[INFO] | | \- org.springframework.boot:spring-boot-autoconfigure:jar:4.0.5:compile +[INFO] | +- org.springframework.boot:spring-boot-actuator-autoconfigure:jar:4.0.5:compile +[INFO] | | \- org.springframework.boot:spring-boot-actuator:jar:4.0.5:compile +[INFO] | +- io.micrometer:micrometer-observation:jar:1.16.4:compile +[INFO] | | \- io.micrometer:micrometer-commons:jar:1.16.4:compile +[INFO] | \- io.micrometer:micrometer-jakarta9:jar:1.16.4:compile +[INFO] +- org.springframework.cloud:spring-cloud-starter-circuitbreaker-resilience4j:jar:5.0.1:compile +[INFO] | +- org.springframework.cloud:spring-cloud-starter:jar:5.0.1:compile +[INFO] | | \- org.springframework.cloud:spring-cloud-context:jar:5.0.1:compile +[INFO] | +- org.springframework.cloud:spring-cloud-circuitbreaker-resilience4j:jar:5.0.1:compile +[INFO] | | +- com.fasterxml.jackson.core:jackson-core:jar:2.21.2:compile +[INFO] | | \- io.github.resilience4j:resilience4j-spring-boot3:jar:2.3.0:compile +[INFO] | | +- io.github.resilience4j:resilience4j-spring6:jar:2.3.0:compile +[INFO] | | | +- io.github.resilience4j:resilience4j-annotations:jar:2.3.0:compile +[INFO] | | | +- io.github.resilience4j:resilience4j-consumer:jar:2.3.0:compile +[INFO] | | | | \- io.github.resilience4j:resilience4j-circularbuffer:jar:2.3.0:runtime +[INFO] | | | \- io.github.resilience4j:resilience4j-framework-common:jar:2.3.0:compile +[INFO] | | \- io.github.resilience4j:resilience4j-micrometer:jar:2.3.0:compile +[INFO] | +- io.github.resilience4j:resilience4j-circuitbreaker:jar:2.3.0:compile +[INFO] | | \- io.github.resilience4j:resilience4j-core:jar:2.3.0:compile +[INFO] | \- io.github.resilience4j:resilience4j-timelimiter:jar:2.3.0:compile +[INFO] +- org.springframework.boot:spring-boot-starter-web:jar:4.0.5:compile +[INFO] | +- org.springframework.boot:spring-boot-starter-jackson:jar:4.0.5:compile +[INFO] | | \- org.springframework.boot:spring-boot-jackson:jar:4.0.5:compile +[INFO] | +- org.springframework.boot:spring-boot-starter-tomcat:jar:4.0.5:compile +[INFO] | | +- org.apache.tomcat.embed:tomcat-embed-core:jar:11.0.20:compile +[INFO] | | \- org.apache.tomcat.embed:tomcat-embed-websocket:jar:11.0.20:compile +[INFO] | +- org.springframework.boot:spring-boot-http-converter:jar:4.0.5:compile +[INFO] | \- org.springframework.boot:spring-boot-webmvc:jar:4.0.5:compile +[INFO] | \- org.springframework.boot:spring-boot-servlet:jar:4.0.5:compile +[INFO] +- org.springframework.boot:spring-boot-starter-security:jar:4.0.5:compile +[INFO] | +- org.springframework.boot:spring-boot-security:jar:4.0.5:compile +[INFO] | | \- org.springframework.security:spring-security-config:jar:7.0.4:compile +[INFO] | \- org.springframework:spring-aop:jar:7.0.6:compile +[INFO] +- org.springframework.boot:spring-boot-starter-jdbc:jar:4.0.5:compile +[INFO] | \- com.zaxxer:HikariCP:jar:7.0.2:compile +[INFO] +- org.springframework.retry:spring-retry:jar:2.0.12:compile +[INFO] +- org.springframework.cloud:spring-cloud-stream:jar:5.0.1:compile +[INFO] | +- org.springframework:spring-messaging:jar:7.0.6:compile +[INFO] | +- org.springframework.integration:spring-integration-core:jar:7.0.4:compile +[INFO] | | +- org.springframework:spring-context:jar:7.0.6:compile +[INFO] | | +- org.springframework:spring-tx:jar:7.0.6:compile +[INFO] | | \- io.projectreactor:reactor-core:jar:3.8.4:compile +[INFO] | +- org.springframework.cloud:spring-cloud-function-context:jar:5.0.1:compile +[INFO] | | +- org.springframework.boot:spring-boot-restclient:jar:4.0.5:compile +[INFO] | | | \- org.springframework.boot:spring-boot-http-client:jar:4.0.5:compile +[INFO] | | +- org.springframework.cloud:spring-cloud-function-core:jar:5.0.1:compile +[INFO] | | \- org.springframework.boot:spring-boot-web-server:jar:4.0.5:compile +[INFO] | +- org.springframework.cloud:spring-cloud-function-grpc:jar:5.0.1:compile +[INFO] | | \- io.grpc:grpc-netty-shaded:jar:1.71.0:compile +[INFO] | \- org.springframework.integration:spring-integration-jmx:jar:7.0.4:compile +[INFO] +- io.micrometer:micrometer-registry-statsd:jar:1.16.4:compile +[INFO] | +- io.micrometer:micrometer-core:jar:1.16.4:compile +[INFO] | | \- org.latencyutils:LatencyUtils:jar:2.0.3:runtime +[INFO] | \- io.netty:netty-transport-native-epoll:jar:4.2.1.Final:compile (version managed from 4.1.100.Final) +[INFO] | +- io.netty:netty-common:jar:4.2.1.Final:compile +[INFO] | +- io.netty:netty-buffer:jar:4.2.1.Final:compile +[INFO] | +- io.netty:netty-transport:jar:4.2.1.Final:compile +[INFO] | \- io.netty:netty-transport-native-unix-common:jar:4.2.1.Final:compile +[INFO] +- org.projectlombok:lombok:jar:1.18.44:compile (optional) +[INFO] +- org.jspecify:jspecify:jar:1.0.0:compile +[INFO] +- org.postgresql:postgresql:jar:42.7.10:runtime +[INFO] +- org.apache.commons:commons-lang3:jar:3.19.0:compile +[INFO] +- com.google.guava:guava:jar:33.4.8-jre:compile +[INFO] +- org.springframework.boot:spring-boot-starter-data-jpa:jar:4.0.5:compile +[INFO] | +- org.springframework.boot:spring-boot-starter-aop:jar:4.0.5:compile +[INFO] | +- org.springframework.boot:spring-boot-data-jpa:jar:4.0.5:compile +[INFO] | | +- org.hibernate.orm:hibernate-core:jar:7.2.7.Final:compile +[INFO] | | | +- jakarta.persistence:jakarta.persistence-api:jar:3.2.0:compile +[INFO] | | | +- jakarta.transaction:jakarta.transaction-api:jar:2.0.1:compile +[INFO] | | | \- org.jboss.logging:jboss-logging:jar:3.6.1.Final:compile +[INFO] | | \- org.springframework.data:spring-data-jpa:jar:4.0.4:compile +[INFO] | | \- org.springframework.data:spring-data-commons:jar:4.0.4:compile +[INFO] | \- org.springframework:spring-aspects:jar:7.0.6:compile +[INFO] +- com.example.demo:common-utils:jar:2.0-42:compile +[INFO] | +- com.example.demo:common-model:jar:2.0-42:compile +[INFO] | | +- com.fasterxml.jackson.core:jackson-annotations:jar:2.21.2:compile +[INFO] | | \- com.fasterxml.jackson.core:jackson-databind:jar:2.21.2:compile +[INFO] | \- com.example.demo:common-config:jar:2.0-42:compile +[INFO] +- net.logstash.logback:logstash-logback-encoder:jar:8.0:compile +[INFO] +- org.springframework.boot:spring-boot-starter-test:jar:4.0.5:test +[INFO] | +- org.springframework.boot:spring-boot-test:jar:4.0.5:test +[INFO] | +- org.springframework.boot:spring-boot-test-autoconfigure:jar:4.0.5:test +[INFO] | +- com.jayway.jsonpath:json-path:jar:2.9.0:test +[INFO] | +- org.assertj:assertj-core:jar:3.27.3:test +[INFO] | +- org.hamcrest:hamcrest:jar:3.0:test +[INFO] | +- org.junit.jupiter:junit-jupiter:jar:5.12.2:test +[INFO] | | +- org.junit.jupiter:junit-jupiter-api:jar:5.12.2:test +[INFO] | | +- org.junit.jupiter:junit-jupiter-params:jar:5.12.2:test +[INFO] | | \- org.junit.jupiter:junit-jupiter-engine:jar:5.12.2:test +[INFO] | +- org.mockito:mockito-core:jar:5.18.0:test +[INFO] | +- org.mockito:mockito-junit-jupiter:jar:5.18.0:test +[INFO] | \- org.skyscreamer:jsonassert:jar:2.0-rc1:test +[INFO] +- org.testcontainers:testcontainers:jar:2.0.4:test +[INFO] | +- com.github.docker-java:docker-java-api:jar:4.0.0:test +[INFO] | \- com.github.docker-java:docker-java-transport-zerodep:jar:4.0.0:test +[INFO] +- org.testcontainers:testcontainers-junit-jupiter:jar:2.0.4:test +[INFO] +- org.testcontainers:testcontainers-postgresql:jar:2.0.4:test (omitted for duplicate) +[INFO] +- org.springframework.security:spring-security-test:jar:7.0.4:test +[INFO] +- org.liquibase:liquibase-core:jar:5.0.2:test +[INFO] +- org.hibernate.orm:hibernate-processor:jar:7.2.7.Final:provided (optional) +[INFO] | +- io.smallrye:jandex:jar:3.3.2:provided (optional) +[INFO] | +- jakarta.validation:jakarta.validation-api:jar:3.1.1:compile +[INFO] | +- org.antlr:antlr4-runtime:jar:4.13.2:compile +[INFO] | \- net.bytebuddy:byte-buddy:jar:1.17.8:compile +[INFO] \- io.rest-assured:spring-mock-mvc:jar:6.0.0:test +[INFO] +- io.rest-assured:spring-commons:jar:6.0.0:test +[INFO] \- org.springframework:spring-webmvc:jar:7.0.6:compile +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] BUILD SUCCESS +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] Total time: 4.406 s +[INFO] Finished at: 2026-04-10T17:56:14+02:00 +[INFO] -------------------------------------------------------------------------------------------------------------------------- diff --git a/tests/fixtures/mvn_dep_tree_simple.txt b/tests/fixtures/mvn_dep_tree_simple.txt new file mode 100644 index 000000000..92a436d85 --- /dev/null +++ b/tests/fixtures/mvn_dep_tree_simple.txt @@ -0,0 +1,22 @@ +[INFO] Scanning for projects... +[INFO] +[INFO] ------------------< com.example:my-app >------------------- +[INFO] Building my-app 1.0.0 +[INFO] from pom.xml +[INFO] --------------------------------[ jar ]--------------------------------- +[INFO] +[INFO] --- dependency:3.7.0:tree (default-cli) @ my-app --- +[INFO] com.example:my-app:jar:1.0.0 +[INFO] +- org.slf4j:slf4j-api:jar:2.0.17:compile +[INFO] +- com.google.guava:guava:jar:33.0.0-jre:compile +[INFO] | +- com.google.guava:failureaccess:jar:1.0.2:compile +[INFO] | \- com.google.guava:listenablefuture:jar:9999.0-empty-to-avoid-conflict-with-guava:compile +[INFO] +- com.fasterxml.jackson.core:jackson-databind:jar:2.19.2:compile +[INFO] | +- com.fasterxml.jackson.core:jackson-annotations:jar:2.19.2:compile +[INFO] | \- com.fasterxml.jackson.core:jackson-core:jar:2.19.2:compile +[INFO] \- org.junit.jupiter:junit-jupiter:jar:5.11.4:test +[INFO] +- org.junit.jupiter:junit-jupiter-api:jar:5.11.4:test +[INFO] \- org.junit.jupiter:junit-jupiter-engine:jar:5.11.4:test +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD SUCCESS +[INFO] ------------------------------------------------------------------------ diff --git a/tests/fixtures/mvn_test_fail_auth.txt b/tests/fixtures/mvn_test_fail_auth.txt new file mode 100644 index 000000000..018dad29e --- /dev/null +++ b/tests/fixtures/mvn_test_fail_auth.txt @@ -0,0 +1,95 @@ +[INFO] Scanning for projects... +[WARNING] Could not transfer metadata from/to central: Checksum validation failed +[WARNING] +[WARNING] 1 problem was encountered while building the effective model for 'com.example:webapp:jar:1.0-SNAPSHOT' +[WARNING] +[WARNING] Total model problems reported: 1 +[WARNING] +[INFO] +[INFO] -------------------------------------------------< com.example:webapp >-------------------------------------------------- +[INFO] Building webapp 1.0-SNAPSHOT +[INFO] from pom.xml +[INFO] ---------------------------------------------------------[ jar ]---------------------------------------------------------- +[WARNING] 176 problems were encountered while building the effective model for 'com.example:dep:jar:3.2.0' during n/a +[WARNING] 130 problems were encountered while building the effective model for 'org.redisson:redisson-spring-boot-starter:jar:4.0.0' +[WARNING] 76 problems were encountered while building the effective model for 'com.playtika.testcontainers:embedded-redis:jar:3.1.11' +[INFO] +[INFO] --- clean:3.5.0:clean (default-clean) @ webapp --- +[INFO] Deleting /home/user/project/target +[INFO] +[INFO] --- jacoco:0.8.14:prepare-agent (prepare-agent) @ webapp --- +[INFO] argLine set to -javaagent:/home/user/.m2/repository/org/jacoco/org.jacoco.agent/0.8.14/org.jacoco.agent-0.8.14-runtime.jar +[INFO] +[INFO] --- testcontainers-jooq-codegen:0.0.4:generate (generate-jooq-sources) @ webapp --- +[WARNING] Could not transfer metadata from/to flyway-repo: Connection refused +[INFO] Image pull policy will be performed by: DefaultPullPolicy() +[WARNING] [stderr] Apr 08, 2026 5:45:45 PM liquibase.changelog +[WARNING] [stderr] INFO: Reading resource: db/changelog/2025/db.changelog-main.yaml +[WARNING] [stderr] INFO: Reading resource: db/changelog/2025/db.changelog-001.yaml +[WARNING] [stderr] INFO: Reading resource: db/changelog/2025/db.changelog-002.yaml +[INFO] +[INFO] --- maven-compiler-plugin:3.14.0:compile (default-compile) @ webapp --- +[INFO] Compiling 42 source files with javac [debug target 25] to target/classes +[INFO] +[INFO] --- maven-compiler-plugin:3.14.0:testCompile (default-testCompile) @ webapp --- +[INFO] Compiling 18 source files with javac [debug target 25] to target/test-classes +[WARNING] /home/user/project/src/test/java/com/example/TestFactory.java:[44,27] deprecated +[WARNING] /home/user/project/src/test/java/com/example/TestFactory.java:[48,45] deprecated +[WARNING] /home/user/project/src/test/java/com/example/SpecificationsTest.java:[60,44] deprecated +[INFO] /home/user/project/src/test/java/com/example/ProcessorTest.java: uses unchecked or unsafe operations. +[INFO] +[INFO] --- surefire:3.5.5:test (default-test) @ webapp --- +[INFO] Using auto detected provider org.apache.maven.surefire.junitplatform.JUnitPlatformProvider +[WARNING] The system property java.util.logging.config.file is configured twice! +[INFO] +[INFO] ------------------------------------------------------- +[INFO] T E S T S +[INFO] ------------------------------------------------------- +[INFO] Running com.example.score.ScoreTypeTest +[INFO] Running com.example.company.EmailParserTest +[ERROR] Tests run: 2, Failures: 1, Errors: 0, Skipped: 0, Time elapsed: 0.050 s <<< FAILURE! -- in com.example.company.EmailParserTest +[ERROR] Tests run: 3, Failures: 1, Errors: 0, Skipped: 0, Time elapsed: 0.058 s <<< FAILURE! -- in com.example.score.ScoreTypeTest +[ERROR] com.example.company.EmailParserTest.should_extract_domain_from_email -- Time elapsed: 0.009 s <<< FAILURE! +org.opentest4j.AssertionFailedError: + +expected: "broken.example.com" + but was: "user.example.com" + at com.example.company.EmailParserTest.should_extract_domain_from_email(EmailParserTest.java:14) + +[ERROR] com.example.score.ScoreTypeTest.shouldMapToRole -- Time elapsed: 0.008 s <<< FAILURE! +org.opentest4j.AssertionFailedError: + +expected: "app:BROKEN" + but was: "app:all" + at com.example.score.ScoreTypeTest.shouldMapToRole(ScoreTypeTest.java:24) + +[INFO] +[INFO] Results: +[INFO] +[ERROR] Failures: +[ERROR] EmailParserTest.should_extract_domain_from_email:14 +expected: "broken.example.com" + but was: "user.example.com" +[ERROR] ScoreTypeTest.shouldMapToRole:24 +expected: "app:BROKEN" + but was: "app:all" +[INFO] +[ERROR] Tests run: 5, Failures: 2, Errors: 0, Skipped: 0 +[INFO] +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] BUILD FAILURE +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] Total time: 23.819 s +[INFO] Finished at: 2026-04-08T17:45:57+02:00 +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:3.5.5:test (default-test) on project webapp: There are test failures. +[ERROR] +[ERROR] See /home/user/project/target/surefire-reports for the individual test results. +[ERROR] See dump files (if any exist) [date].dump, [date]-jvmRun[N].dump and [date].dumpstream. +[ERROR] -> [Help 1] +[ERROR] +[ERROR] To see the full stack trace of the errors, re-run Maven with the '-e' switch +[ERROR] Re-run Maven using the '-X' switch to enable verbose output +[ERROR] +[ERROR] For more information about the errors and possible solutions, please read the following articles: +[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException diff --git a/tests/fixtures/mvn_test_large_suite.txt b/tests/fixtures/mvn_test_large_suite.txt new file mode 100644 index 000000000..5ab6abb71 --- /dev/null +++ b/tests/fixtures/mvn_test_large_suite.txt @@ -0,0 +1,204 @@ +[INFO] Scanning for projects... +[WARNING] Could not transfer metadata from/to central: Checksum validation failed +[INFO] +[INFO] -----------------------< com.example:platform >------------------------ +[INFO] Building platform 2.0-SNAPSHOT +[INFO] from pom.xml +[INFO] --------------------------------[ jar ]--------------------------------- +[WARNING] 42 problems were encountered while building the effective model +[INFO] +[INFO] --- maven-compiler-plugin:3.14.0:compile (default-compile) @ platform --- +[INFO] Compiling 120 source files with javac +[INFO] +[INFO] --- maven-compiler-plugin:3.14.0:testCompile (default-testCompile) @ platform --- +[INFO] Compiling 85 source files with javac +[WARNING] /home/user/project/src/test/java/com/example/DeprecatedTest.java:[10,5] deprecated +[WARNING] /home/user/project/src/test/java/com/example/OtherTest.java:[20,8] deprecated +[INFO] +[INFO] --- surefire:3.5.4:test (default-test) @ platform --- +[INFO] Using auto detected provider org.apache.maven.surefire.junitplatform.JUnitPlatformProvider +[INFO] +[INFO] ------------------------------------------------------- +[INFO] T E S T S +[INFO] ------------------------------------------------------- +[INFO] Running com.example.registry.SearchModelTest +[INFO] Tests run: 50, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.123 s -- in com.example.registry.SearchModelTest +[INFO] Running com.example.registry.IndexServiceTest +[INFO] Tests run: 30, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.456 s -- in com.example.registry.IndexServiceTest +[INFO] Running com.example.organization.OrgServiceTest +[INFO] Tests run: 25, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.234 s -- in com.example.organization.OrgServiceTest +[INFO] Running com.example.organization.PatchableFieldTest +[ERROR] Tests run: 6, Failures: 0, Errors: 6, Skipped: 0, Time elapsed: 0.010 s <<< FAILURE! -- in com.example.organization.PatchableFieldTest +[ERROR] com.example.organization.PatchableFieldTest.fromStrings_returns_empty -- Time elapsed: 0 s <<< ERROR! +java.lang.Error: +Unresolved compilation problem: + log cannot be resolved + + at com.example.organization.PatchableFieldTest.fromStrings_returns_empty(PatchableFieldTest.java:12) + +[ERROR] com.example.organization.PatchableFieldTest.fromStrings_converts_names -- Time elapsed: 0 s <<< ERROR! +java.lang.Error: +Unresolved compilation problem: + log cannot be resolved + + at com.example.organization.PatchableFieldTest.fromStrings_converts_names(PatchableFieldTest.java:20) + +[ERROR] com.example.organization.PatchableFieldTest.shouldUpdate_false_when_empty -- Time elapsed: 0 s <<< ERROR! +java.lang.ClassCastException: class com.example.organization.PatchableField not an enum + at com.example.organization.PatchableFieldTest.shouldUpdate_false_when_empty(PatchableFieldTest.java:36) + +[ERROR] com.example.organization.PatchableFieldTest.shouldUpdate_true_when_null -- Time elapsed: 0 s <<< ERROR! +java.lang.Error: +Unresolved compilation problem: + + at com.example.organization.PatchableField.shouldUpdate(PatchableField.java:29) + +[ERROR] com.example.organization.PatchableFieldTest.shouldUpdateAny_true -- Time elapsed: 0 s <<< ERROR! +java.lang.NullPointerException: Cannot invoke "java.lang.Enum.getDeclaringClass()" because "e1" is null + at com.example.organization.PatchableFieldTest.shouldUpdateAny_true(PatchableFieldTest.java:49) + +[ERROR] com.example.organization.PatchableFieldTest.fromStrings_ignores_unknown -- Time elapsed: 0 s <<< ERROR! +java.lang.Error: +Unresolved compilation problem: + log cannot be resolved + + at com.example.organization.PatchableFieldTest.fromStrings_ignores_unknown(PatchableFieldTest.java:55) + +[INFO] Running com.example.search.SearchReadModelTest +[ERROR] Tests run: 3, Failures: 3, Errors: 0, Skipped: 0, Time elapsed: 4.567 s <<< FAILURE! -- in com.example.search.SearchReadModelTest +[ERROR] com.example.search.SearchReadModelTest.should_get_person_ratings -- Time elapsed: 1.234 s <<< FAILURE! +java.lang.AssertionError: +Expecting actual: + [("Group1", {SELF=50, SUPERVISOR=35}, 40, null, 3), + ("Group2", {SELF=80}, 80, null, 1)] +to have size: + <1> +but had size: + <2> + + at com.example.search.SearchReadModelTest.should_get_person_ratings(SearchReadModelTest.java:85) + +[ERROR] com.example.search.SearchReadModelTest.should_get_company_ratings -- Time elapsed: 0.987 s <<< FAILURE! +org.assertj.core.error.AssertJMultipleFailuresError: +[List check single element] (1 failure) +-- failure 1 -- +expected: 42 + but was: 38 + + at com.example.search.SearchReadModelTest.should_get_company_ratings(SearchReadModelTest.java:120) + +[ERROR] com.example.search.SearchReadModelTest.should_not_include_unknown_skills -- Time elapsed: 0.456 s <<< FAILURE! +org.assertj.core.error.AssertJMultipleFailuresError: +[List check single element] (1 failure) +-- failure 1 -- +expected size: 0 but was: 1 + + at com.example.search.SearchReadModelTest.should_not_include_unknown_skills(SearchReadModelTest.java:150) + +[INFO] Running com.example.payment.PaymentServiceTest +[INFO] Tests run: 100, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.901 s -- in com.example.payment.PaymentServiceTest +[INFO] Running com.example.report.ReportGeneratorTest +[INFO] Tests run: 45, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.210 s -- in com.example.report.ReportGeneratorTest +[INFO] Running com.example.integration.ApiIntegrationTest +[ERROR] Tests run: 14, Failures: 0, Errors: 14, Skipped: 0, Time elapsed: 0.015 s <<< FAILURE! -- in com.example.integration.ApiIntegrationTest +[ERROR] com.example.integration.ApiIntegrationTest.shouldCreateUser -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldDeleteUser -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldUpdateUser -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldListUsers -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldGetUser -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldSearchUsers -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldPaginateUsers -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldFilterUsers -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldSortUsers -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldExportUsers -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldImportUsers -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldValidateInput -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldHandleConcurrency -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.integration.ApiIntegrationTest.shouldRetryOnFailure -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[INFO] +[INFO] Results: +[INFO] +[ERROR] Failures: +[ERROR] SearchReadModelTest.should_get_person_ratings:85 expected size: 1 but was: 2 +[ERROR] SearchReadModelTest.should_get_company_ratings:120 expected: 42 but was: 38 +[ERROR] SearchReadModelTest.should_not_include_unknown_skills:150 expected size: 0 but was: 1 +[ERROR] Errors: +[ERROR] PatchableFieldTest.fromStrings_returns_empty >> Error Unresolved compilation problem: log cannot be resolved +[ERROR] PatchableFieldTest.fromStrings_converts_names >> Error Unresolved compilation problem: log cannot be resolved +[ERROR] PatchableFieldTest.shouldUpdate_false_when_empty >> ClassCast not an enum +[ERROR] PatchableFieldTest.shouldUpdate_true_when_null >> Error Unresolved compilation problem +[ERROR] PatchableFieldTest.shouldUpdateAny_true >> NullPointer Cannot invoke "java.lang.Enum.getDeclaringClass()" +[ERROR] PatchableFieldTest.fromStrings_ignores_unknown >> Error Unresolved compilation problem: log cannot be resolved +[ERROR] ApiIntegrationTest.shouldCreateUser >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldDeleteUser >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldUpdateUser >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldListUsers >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldGetUser >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldSearchUsers >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldPaginateUsers >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldFilterUsers >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldSortUsers >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldExportUsers >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldImportUsers >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldValidateInput >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldHandleConcurrency >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ApiIntegrationTest.shouldRetryOnFailure >> IllegalState ApplicationContext failure threshold (1) exceeded +[INFO] +[ERROR] Tests run: 3262, Failures: 3, Errors: 20, Skipped: 4 +[INFO] +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] BUILD FAILURE +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] Total time: 03:25 min +[INFO] Finished at: 2026-04-08T21:07:54+02:00 +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:3.5.4:test (default-test) on project platform: There are test failures. +[ERROR] +[ERROR] See /home/user/project/target/surefire-reports for the individual test results. +[ERROR] -> [Help 1] +[ERROR] +[ERROR] Re-run Maven using the '-X' switch to enable full debug logging. +[ERROR] +[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException diff --git a/tests/fixtures/mvn_test_many_failures.txt b/tests/fixtures/mvn_test_many_failures.txt new file mode 100644 index 000000000..a3466fff9 --- /dev/null +++ b/tests/fixtures/mvn_test_many_failures.txt @@ -0,0 +1,115 @@ +[INFO] Scanning for projects... +[WARNING] The requested profile "unit-tests" could not be activated because it does not exist. +[INFO] +[INFO] -----------------------< com.example:myapp >------------------------ +[INFO] Building myapp 0.0.1-SNAPSHOT +[INFO] from pom.xml +[INFO] --------------------------------[ jar ]--------------------------------- +[INFO] +[INFO] --- resources:3.3.1:resources (default-resources) @ myapp --- +[INFO] Copying 2 resources from src/main/resources to target/classes +[INFO] +[INFO] --- surefire:3.5.5:test (default-test) @ myapp --- +[INFO] +[INFO] ------------------------------------------------------- +[INFO] T E S T S +[INFO] ------------------------------------------------------- +[INFO] Running com.example.controller.ItemControllerTest +[ERROR] Tests run: 4, Failures: 0, Errors: 4, Skipped: 0, Time elapsed: 8.737 s <<< FAILURE! -- in com.example.controller.ItemControllerTest +[ERROR] com.example.controller.ItemControllerTest.shouldReturn404ForNonExistent -- Time elapsed: 0.003 s <<< ERROR! +java.lang.IllegalStateException: Failed to load ApplicationContext for [WebMergedContextConfiguration@7b05dab testClass = com.example.controller.ItemControllerTest, locations = [], classes = [com.example.MyApplication], contextInitializerClasses = [], activeProfiles = [], propertySourceDescriptors = [], propertySourceProperties = ["org.springframework.boot.test.context.SpringBootTestContextBootstrapper=true"]] + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.lambda$loadContext$0(DefaultCacheAwareContextLoaderDelegate.java:195) + at org.springframework.test.context.cache.DefaultContextCache.put(DefaultContextCache.java:214) + at java.base/java.lang.reflect.Method.invoke(Method.java:565) + +[ERROR] com.example.controller.ItemControllerTest.shouldCreate -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded: skipping repeated attempt to load context + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.controller.ItemControllerTest.shouldUpdate -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded: skipping repeated attempt to load context + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.controller.ItemControllerTest.shouldDelete -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded: skipping repeated attempt to load context + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[INFO] Running com.example.controller.OrderControllerTest +[ERROR] Tests run: 7, Failures: 0, Errors: 7, Skipped: 0, Time elapsed: 0.012 s <<< FAILURE! -- in com.example.controller.OrderControllerTest +[ERROR] com.example.controller.OrderControllerTest.shouldReturn404 -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded: skipping repeated attempt to load context + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.controller.OrderControllerTest.shouldUpdate -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded: skipping repeated attempt to load context + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.controller.OrderControllerTest.shouldCreate -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded: skipping repeated attempt to load context + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.controller.OrderControllerTest.shouldDelete -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded: skipping repeated attempt to load context + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.controller.OrderControllerTest.shouldList -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded: skipping repeated attempt to load context + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.controller.OrderControllerTest.shouldRejectDuplicate -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded: skipping repeated attempt to load context + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.controller.OrderControllerTest.shouldGetById -- Time elapsed: 0.001 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded: skipping repeated attempt to load context + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[INFO] Running com.example.controller.LeaseControllerTest +[ERROR] Tests run: 14, Failures: 0, Errors: 14, Skipped: 0, Time elapsed: 0.019 s <<< FAILURE! -- in com.example.controller.LeaseControllerTest +[ERROR] com.example.controller.LeaseControllerTest.shouldReturn400 -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.controller.LeaseControllerTest.shouldList -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[ERROR] com.example.controller.LeaseControllerTest.shouldGetById -- Time elapsed: 0 s <<< ERROR! +java.lang.IllegalStateException: ApplicationContext failure threshold (1) exceeded + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:157) + +[INFO] +[INFO] Results: +[INFO] +[ERROR] Errors: +[ERROR] ItemControllerTest.shouldReturn404ForNonExistent >> IllegalState Failed to load ApplicationContext +[ERROR] ItemControllerTest.shouldCreate >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ItemControllerTest.shouldUpdate >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] ItemControllerTest.shouldDelete >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] OrderControllerTest.shouldReturn404 >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] OrderControllerTest.shouldUpdate >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] OrderControllerTest.shouldCreate >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] OrderControllerTest.shouldDelete >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] OrderControllerTest.shouldList >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] OrderControllerTest.shouldRejectDuplicate >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] OrderControllerTest.shouldGetById >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] LeaseControllerTest.shouldReturn400 >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] LeaseControllerTest.shouldList >> IllegalState ApplicationContext failure threshold (1) exceeded +[ERROR] LeaseControllerTest.shouldGetById >> IllegalState ApplicationContext failure threshold (1) exceeded +[INFO] +[ERROR] Tests run: 28, Failures: 0, Errors: 28, Skipped: 0 +[INFO] +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD FAILURE +[INFO] ------------------------------------------------------------------------ +[INFO] Total time: 22.379 s +[INFO] Finished at: 2026-04-08T20:58:01+02:00 +[INFO] ------------------------------------------------------------------------ +[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:3.5.5:test (default-test) on project myapp: There are test failures. +[ERROR] +[ERROR] See /home/user/project/target/surefire-reports for the individual test results. +[ERROR] -> [Help 1] +[ERROR] +[ERROR] Re-run Maven using the '-X' switch to enable full debug logging. +[ERROR] +[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException diff --git a/tests/fixtures/mvn_test_multimodule.txt b/tests/fixtures/mvn_test_multimodule.txt new file mode 100644 index 000000000..ab3ea3404 --- /dev/null +++ b/tests/fixtures/mvn_test_multimodule.txt @@ -0,0 +1,118 @@ +[INFO] Scanning for projects... +[INFO] Building parent 1.0-SNAPSHOT +[INFO] --------------------------------[ pom ]--------------------------------- +[INFO] +[INFO] --- maven-surefire-plugin:3.1.2:test (default-test) @ common --- +[INFO] +[INFO] ------------------------------------------------------- +[INFO] T E S T S +[INFO] ------------------------------------------------------- +[INFO] Running com.example.common.PathUtilsTest +[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.056 s -- in com.example.common.PathUtilsTest +[INFO] Tests run: 10, Failures: 0, Errors: 0, Skipped: 0 +[INFO] +[INFO] Results: +[INFO] +[INFO] Tests run: 10, Failures: 0, Errors: 0, Skipped: 0 +[INFO] +[INFO] --- maven-surefire-plugin:3.1.2:test (default-test) @ data --- +[INFO] +[INFO] ------------------------------------------------------- +[INFO] T E S T S +[INFO] ------------------------------------------------------- +[INFO] Running com.example.data.RepositoryTest +[INFO] Tests run: 34, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.036 s -- in com.example.data.RepositoryTest +[INFO] Running com.example.data.ModelTest +[INFO] Tests run: 25, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.370 s -- in com.example.data.ModelTest +[INFO] Tests run: 194, Failures: 0, Errors: 0, Skipped: 0 +[INFO] +[INFO] Results: +[INFO] +[INFO] Tests run: 194, Failures: 0, Errors: 0, Skipped: 0 +[INFO] +[INFO] --- maven-surefire-plugin:3.1.2:test (default-test) @ ml --- +[INFO] +[INFO] ------------------------------------------------------- +[INFO] T E S T S +[INFO] ------------------------------------------------------- +[INFO] Running com.example.ml.BenchmarkTest +[INFO] Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.632 s -- in com.example.ml.BenchmarkTest +[INFO] Tests run: 5, Failures: 0, Errors: 0, Skipped: 0 +[INFO] +[INFO] Results: +[INFO] +[INFO] Tests run: 5, Failures: 0, Errors: 0, Skipped: 0 +[INFO] +[INFO] --- maven-surefire-plugin:3.1.2:test (default-test) @ services --- +[INFO] +[INFO] ------------------------------------------------------- +[INFO] T E S T S +[INFO] ------------------------------------------------------- +[INFO] Running com.example.services.FileHasherTest +[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0 s -- in com.example.services.FileHasherTest +[INFO] Running com.example.services.GitDiffReaderTest +[ERROR] Tests run: 4, Failures: 0, Errors: 4, Skipped: 0, Time elapsed: 3.167 s <<< FAILURE! -- in com.example.services.GitDiffReaderTest +[ERROR] com.example.services.GitDiffReaderTest.shouldBuildDiffWithFiltering -- Time elapsed: 3.109 s <<< ERROR! +org.eclipse.jgit.api.errors.ServiceUnavailableException: Signing service is not available + at org.eclipse.jgit.api.CommitCommand.sign(CommitCommand.java:328) + at org.eclipse.jgit.api.CommitCommand.call(CommitCommand.java:283) + at com.example.services.GitDiffReaderTest.shouldBuildDiffWithFiltering(GitDiffReaderTest.java:116) + at java.base/java.lang.reflect.Method.invoke(Method.java:565) + +[ERROR] com.example.services.GitDiffReaderTest.shouldReturnEmptyDiff -- Time elapsed: 0.013 s <<< ERROR! +org.eclipse.jgit.api.errors.ServiceUnavailableException: Signing service is not available + at org.eclipse.jgit.api.CommitCommand.sign(CommitCommand.java:328) + at org.eclipse.jgit.api.CommitCommand.call(CommitCommand.java:283) + at com.example.services.GitDiffReaderTest.shouldReturnEmptyDiff(GitDiffReaderTest.java:67) + at java.base/java.lang.reflect.Method.invoke(Method.java:565) + +[ERROR] com.example.services.GitDiffReaderTest.shouldBuildDiff -- Time elapsed: 0.012 s <<< ERROR! +org.eclipse.jgit.api.errors.ServiceUnavailableException: Signing service is not available + at org.eclipse.jgit.api.CommitCommand.sign(CommitCommand.java:328) + at com.example.services.GitDiffReaderTest.shouldBuildDiff(GitDiffReaderTest.java:35) + +[ERROR] com.example.services.GitDiffReaderTest.shouldBuildDiffForDevops -- Time elapsed: 0.011 s <<< ERROR! +org.eclipse.jgit.api.errors.ServiceUnavailableException: Signing service is not available + at org.eclipse.jgit.api.CommitCommand.sign(CommitCommand.java:328) + at com.example.services.GitDiffReaderTest.shouldBuildDiffForDevops(GitDiffReaderTest.java:88) + +[INFO] Running com.example.services.IoHelperTest +[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0 s -- in com.example.services.IoHelperTest +[INFO] Tests run: 651, Failures: 0, Errors: 4, Skipped: 4 +[INFO] +[INFO] Results: +[INFO] +[ERROR] Errors: +[ERROR] GitDiffReaderTest.shouldBuildDiff:35 >> ServiceUnavailable Signing service is not available +[ERROR] GitDiffReaderTest.shouldBuildDiffForDevops:88 >> ServiceUnavailable Signing service is not available +[ERROR] GitDiffReaderTest.shouldBuildDiffWithFiltering:116 >> ServiceUnavailable Signing service is not available +[ERROR] GitDiffReaderTest.shouldReturnEmptyDiff:67 >> ServiceUnavailable Signing service is not available +[INFO] +[ERROR] Tests run: 860, Failures: 0, Errors: 4, Skipped: 4 +[INFO] +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] Reactor Summary for parent 1.0-SNAPSHOT: +[INFO] +[INFO] parent ............................................................................................... SUCCESS [ 0.189 s] +[INFO] common ............................................................................................... SUCCESS [ 0.959 s] +[INFO] data ................................................................................................. SUCCESS [ 28.127 s] +[INFO] ml ................................................................................................... SUCCESS [ 2.214 s] +[INFO] services ............................................................................................. FAILURE [ 58.575 s] +[INFO] integrations ......................................................................................... SKIPPED +[INFO] webapp ............................................................................................... SKIPPED +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] BUILD FAILURE +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] Total time: 01:31 min +[INFO] Finished at: 2026-04-08T20:45:10+02:00 +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:3.1.2:test (default-test) on project services: There are test failures. +[ERROR] +[ERROR] See /home/user/project/services/target/surefire-reports for the individual test results. +[ERROR] -> [Help 1] +[ERROR] +[ERROR] To see the full stack trace of the errors, re-run Maven with the '-e' switch +[ERROR] Re-run Maven using the '-X' switch to enable verbose output +[ERROR] +[ERROR] For more information about the errors and possible solutions, please read the following articles: +[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException diff --git a/tests/fixtures/mvn_test_pass_large_ansi.txt b/tests/fixtures/mvn_test_pass_large_ansi.txt new file mode 100644 index 000000000..93e3ac76e --- /dev/null +++ b/tests/fixtures/mvn_test_pass_large_ansi.txt @@ -0,0 +1,53 @@ +[INFO] Loaded 22505 auto-discovered prefixes for remote repository central (prefixes-central.txt) +[INFO] Scanning for projects... +[WARNING] Could not transfer metadata /.meta/prefixes.txt from/to shibboleth-releases-5d99c0486312ae43e4bc39d107c6522d62203c00 (https://build.shibboleth.net/nexus/content/repositories/releases/): Checksum validation failed, no checksums available +[INFO] -------------------------------------------------< com.example:auth >-------------------------------------------------- +[INFO] Building auth 1.3-SNAPSHOT +[INFO] from pom.xml +[INFO] ---------------------------------------------------------[ jar ]---------------------------------------------------------- +[INFO] Loaded 22505 auto-discovered prefixes for remote repository central (prefixes-central.txt) +[INFO] Image name substitution will be performed by: DefaultImageNameSubstitutor (composite of 'ConfigurationFileImageNameSubstitutor' and 'PrefixingImageNameSubstitutor') +[INFO] Found Docker environment with local Unix socket (unix:///var/run/docker.sock) +[INFO] Docker host IP address is localhost +[INFO] Creating container for image: testcontainers/ryuk:0.14.0 +[INFO] Container testcontainers/ryuk:0.14.0 started in PT0.44146219S +[INFO] ✔︎ Docker server version should be at least 1.6.0 +[INFO] Container postgres:17 started in PT5.716666928S +[WARNING] [stderr] Apr 08, 2026 9:25:10 PM liquibase.database +[WARNING] [stderr] Apr 08, 2026 9:25:10 PM liquibase.changelog +[WARNING] [stderr] INFO: Reading resource: db/changelog/2024/db.changelog.2024-1719408042.sql +[WARNING] [stderr] Apr 08, 2026 9:25:10 PM liquibase.changelog +[INFO] ------------------------------------------------------- +[INFO] T E S T S +[INFO] ------------------------------------------------------- +[INFO] Running com.example.webapp.user.PermissionCompareTest +[INFO] Running com.example.webapp.user.ModifyTeamTest +[INFO] Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.044 s -- in com.example.webapp.user.PermissionCompareTest +[INFO] Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.065 s -- in com.example.webapp.user.PermissionGrouperTest +[INFO] Running com.example.webapp.user.PermissionProcessorTest +[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.004 s -- in com.example.webapp.user.PermissionProcessorTest +[INFO] Running com.example.webapp.user.ProductHelperTest +[INFO] Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.003 s -- in com.example.webapp.user.ProductHelperTest +[INFO] Running com.example.webapp.user.RelayStateUtilTest +[INFO] Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.015 s -- in com.example.webapp.user.RelayStateUtilTest +[INFO] [stdout] Running Changeset: db/changelog/2024/db.changelog.2024-1719408042.sql::dev2:1719408042-1::dev2 +[INFO] [stdout] Running Changeset: db/changelog/2024/db.changelog.2024-1719478245.yaml::1719478245-1::dev1 +[INFO] [stdout] Running Changeset: db/changelog/2024/db.changelog.2024-1719571809.yaml::1719571809-1::dev1 +[INFO] [stdout] Running Changeset: db/changelog/2024/db.changelog.2024-1721123826.yaml::1721123826-1::dev1 +[INFO] [stdout] Running Changeset: db/changelog/2024/db.changelog.2024-1726125249.yaml::1726125249-1::dev1 +[INFO] [stdout] Running Changeset: db/changelog/2024/db.changelog.2024-1726133977.yaml::1726133977-1::dev2 +[INFO] [stdout] Running Changeset: db/changelog/2024/db.changelog.2024-1726647573.yaml::1726647573-1::dev1 +[INFO] [stdout] Running Changeset: db/changelog/2024/db.changelog.2024-1727864000.sql::1727864000-1::dev3 +[INFO] [stdout] Running Changeset: db/changelog/2024/db.changelog.2024-1727939482.yaml::1727939482-1::dev1 +[INFO] [stdout] Running Changeset: db/changelog/2024/db.changelog.2024-1728049660.yaml::1728049660-1::dev1 +[INFO] Tests run: 61, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 18.44 s -- in com.example.webapp.scim.ScimUserControllerIntegrationTest +[INFO] Results: +[INFO] +[INFO] Tests run: 959, Failures: 0, Errors: 0, Skipped: 9 +[INFO] +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] BUILD SUCCESS +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] Total time: 01:32 min +[INFO] Finished at: 2026-04-08T21:26:30+02:00 +[INFO] -------------------------------------------------------------------------------------------------------------------------- diff --git a/tests/fixtures/mvn_test_pass_mavenmcp.txt b/tests/fixtures/mvn_test_pass_mavenmcp.txt new file mode 100644 index 000000000..beb535770 --- /dev/null +++ b/tests/fixtures/mvn_test_pass_mavenmcp.txt @@ -0,0 +1,35 @@ +[INFO] Scanning for projects... +[INFO] +[INFO] -------------------< com.example:my-app >-------------------- +[INFO] Building my-app 1.0-SNAPSHOT +[INFO] from pom.xml +[INFO] --------------------------------[ jar ]--------------------------------- +[INFO] +[INFO] --- maven-compiler-plugin:3.13.0:compile (default-compile) --- +[INFO] Nothing to compile - all classes are up to date. +[INFO] +[INFO] --- maven-surefire-plugin:3.5.4:test (default-test) --- +[INFO] Using auto detected provider org.apache.maven.surefire.junitplatform.JUnitPlatformProvider +[INFO] +[INFO] ------------------------------------------------------- +[INFO] T E S T S +[INFO] ------------------------------------------------------- +[INFO] Running com.example.config.AppConfigTest +[INFO] Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.047 s -- in com.example.config.AppConfigTest +[INFO] Running com.example.service.UserServiceTest +[INFO] Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.009 s -- in com.example.service.UserServiceTest +[INFO] Running com.example.controller.ApiControllerTest +[INFO] Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.325 s -- in com.example.controller.ApiControllerTest +[INFO] Running com.example.repository.ItemRepositoryTest +[INFO] Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.060 s -- in com.example.repository.ItemRepositoryTest +[INFO] +[INFO] Results: +[INFO] +[INFO] Tests run: 183, Failures: 0, Errors: 0, Skipped: 0 +[INFO] +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD SUCCESS +[INFO] ------------------------------------------------------------------------ +[INFO] Total time: 4.748 s +[INFO] Finished at: 2026-04-08T18:26:55+02:00 +[INFO] ------------------------------------------------------------------------ diff --git a/tests/fixtures/mvn_verify_auth.txt b/tests/fixtures/mvn_verify_auth.txt new file mode 100644 index 000000000..5d44b1539 --- /dev/null +++ b/tests/fixtures/mvn_verify_auth.txt @@ -0,0 +1,50 @@ +[INFO] Scanning for projects... +[INFO] +[INFO] --- surefire:3.5.5:test (default-test) @ app --- +[INFO] +[INFO] ------------------------------------------------------- +[INFO] T E S T S +[INFO] ------------------------------------------------------- +[INFO] Running com.example.app.AlphaTest +[INFO] Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.051 s -- in com.example.app.AlphaTest +[INFO] Running com.example.app.BetaTest +[INFO] Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.020 s -- in com.example.app.BetaTest +[INFO] Running com.example.app.GammaTest +[INFO] Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.595 s -- in com.example.app.GammaTest +[WARNING] Tests run: 8, Failures: 0, Errors: 0, Skipped: 8, Time elapsed: 0 s -- in com.example.app.SkippedSuiteTest +[INFO] Running com.example.app.DeltaTest +[INFO] Tests run: 656, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.221 s -- in com.example.app.DeltaTest +[INFO] +[INFO] Results: +[INFO] +[INFO] Tests run: 688, Failures: 0, Errors: 0, Skipped: 8 +[INFO] +[INFO] --- spring-boot:4.0.5:repackage (repackage) @ app --- +[INFO] Replacing main artifact /build/app.jar with repackaged archive +[INFO] +[INFO] --- failsafe:3.5.5:integration-test (default) @ app --- +[INFO] Using auto detected provider org.apache.maven.surefire.junitplatform.JUnitPlatformProvider +[INFO] +[INFO] ------------------------------------------------------- +[INFO] T E S T S +[INFO] ------------------------------------------------------- +[INFO] Running com.example.app.AuthenticationTest +[INFO] Tests run: 9, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 17.19 s -- in com.example.app.AuthenticationTest +[INFO] Running com.example.app.ProvisioningTest +[INFO] Tests run: 13, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.23 s -- in com.example.app.ProvisioningTest +[INFO] Running com.example.app.ScimTest +[WARNING] Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0 s -- in com.example.app.ScimTest +[INFO] Running com.example.app.E2EFlowTest +[INFO] Tests run: 239, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 31.44 s -- in com.example.app.E2EFlowTest +[INFO] +[INFO] Results: +[INFO] +[INFO] Tests run: 262, Failures: 0, Errors: 0, Skipped: 1 +[INFO] +[INFO] --- failsafe:3.5.5:verify (default) @ app --- +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] BUILD SUCCESS +[INFO] -------------------------------------------------------------------------------------------------------------------------- +[INFO] Total time: 02:11 min +[INFO] Finished at: 2026-04-14T18:32:59+02:00 +[INFO] -------------------------------------------------------------------------------------------------------------------------- From a8aa44b7166a2515d711a2a1648e261315502e92 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 20:48:30 +0200 Subject: [PATCH 21/44] =?UTF-8?q?docs(mvn):=20spec=20=E2=80=94=20surefire/?= =?UTF-8?q?failsafe=20XML=20enrichment=20design?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ports maven-mcp's SurefireReportParser and StackTraceProcessor to Rust as post-text-filter enrichment layer. Adds appPackage autodetect from pom.xml groupId and no-tests red-flag heuristic. Time-gated XML reads prevent stale fixture pollution. Targets our fork only; stacks on feat/mvn-rust-module. --- ...4-15-mvn-surefire-xml-enrichment-design.md | 517 ++++++++++++++++++ 1 file changed, 517 insertions(+) create mode 100644 docs/superpowers/specs/2026-04-15-mvn-surefire-xml-enrichment-design.md diff --git a/docs/superpowers/specs/2026-04-15-mvn-surefire-xml-enrichment-design.md b/docs/superpowers/specs/2026-04-15-mvn-surefire-xml-enrichment-design.md new file mode 100644 index 000000000..715a0a8c0 --- /dev/null +++ b/docs/superpowers/specs/2026-04-15-mvn-surefire-xml-enrichment-design.md @@ -0,0 +1,517 @@ +# mvn Surefire/Failsafe XML Enrichment — Design + +**Status:** Draft → ready for implementation planning +**Branch:** `feat/mvn-surefire-xml` (stacked on `feat/mvn-rust-module`) +**PR target:** `master` of fork `mariuszs/rtk-java` +**Related:** upstream PR rtk-ai/rtk#1089 (existing mvn filter), maven-mcp project (Java prior art) + +## Context + +The current `mvn test` filter (landed in PR #1089 on upstream `rtk-ai/rtk`, also present on our fork's `feat/mvn-rust-module`) runs a text state machine on stdout. It compresses 60–99% of tokens on happy paths but **loses diagnostic signal** in four concrete failure modes: + +1. **Aggregate-only failures.** Only test names + up to 3 detail lines per failure survive. Stack traces, assertion messages, and root causes are dropped. Agents fall back to `rtk proxy mvn test` or manually `cat target/surefire-reports/*.txt`. +2. **No-tests false happy path.** `BUILD SUCCESS` with `Tests run: 0` renders as `"mvn test: no tests run"`, indistinguishable from a healthy but empty run. Real causes (broken surefire plugin config, wrong `-Dtest=` selector) slip through. +3. **Preamble-dropped plugin errors.** `[ERROR]` lines before the `T E S T S` marker (plugin misconfiguration, validation errors) are fully discarded. +4. **Integration-test failures lost.** `ApplicationContext` load failures, Hibernate connection errors, etc. live in `target/failsafe-reports/*.xml` — the text filter never touches them. + +The `maven-mcp` project (`/home/mariusz/projects/maven-mcp`) has a production-grade Java implementation that solves exactly this: a `SurefireReportParser` (JAXP DOM) plus a `StackTraceProcessor` with segment-aware, application-vs-framework-aware, root-cause-preserving truncation. We port that design to Rust and integrate it as a post-text-filter enrichment layer. + +The dependency `quick-xml = "0.37"` is already present (used by `dotnet_trx.rs` for `.trx` parsing). The pattern of "parse artifact files after command execution, with time-gate to skip stale reports" is already established by `dotnet_trx.rs::parse_trx_file_since`. + +## Goals + +- Port `SurefireReportParser` to Rust as `src/cmds/java/surefire_reports.rs` (~500 LoC incl. tests). +- Port `StackTraceProcessor` to Rust as `src/cmds/java/stack_trace.rs` (~400 LoC incl. tests). 1:1 semantic fidelity with the Java original. +- Add `src/cmds/java/pom_groupid.rs` — autodetect `appPackage` from `pom.xml ` (with parent fallback and `RTK_MVN_APP_PACKAGE` override). +- Extend `filter_mvn_test` flow with a new pure-I/O layer `enrich_with_reports(text, cwd, since, app_pkg)` that: + - reads `target/surefire-reports/TEST-*.xml` and `target/failsafe-reports/*.xml` only when the text summary suggests a failure or zero-test situation, + - time-gates files via `mtime >= started_at` captured before `mvn` execution, + - emits a structured failures section appended to the existing summary line. +- Introduce a no-tests red-flag heuristic: distinguish "clean run with no sources" from "suspicious zero tests (possibly misconfigured surefire)". +- Preserve current token-savings targets on the happy path (≥97%) and maintain ≥85% on enriched-failure paths. +- Ship with reusable fixtures ported from `maven-mcp/src/test/resources/surefire-reports/` plus three synthetic `pom.xml` fixtures for groupId detection. + +## Non-Goals + +- Rewriting the existing state-machine text filter. The text filter stays pure and its snapshot tests remain untouched. +- Parsing Surefire `.txt` reports. XML is the canonical structured format; `.txt` is a stdout-redirect and duplicates information we already have. +- Flaky test detection, test re-run logic, coverage integration. +- Live streaming of enriched output. Enrichment runs once, after `mvn` exits. +- Parsing arbitrary non-Maven test reports. Scope is Surefire + Failsafe XML only. +- Changing the upstream PR #1089. That PR lands the text filter as-is; this work stacks on top and ships to our fork only. + +## Architecture + +### Module map + +``` +src/cmds/java/ +├── mod.rs (modified — export new modules) +├── mvn_cmd.rs (modified — integrate enrichment in run_test) +├── surefire_reports.rs (NEW — XML parser, TEST-*.xml iteration) +├── stack_trace.rs (NEW — port of StackTraceProcessor) +└── pom_groupid.rs (NEW — appPackage autodetect) + +tests/fixtures/java/ +├── surefire-reports/ +│ ├── TEST-com.example.PassingTest.xml (copied from maven-mcp) +│ ├── TEST-com.example.FailingTest.xml (copied from maven-mcp) +│ ├── TEST-com.example.FailingTestWithLogs.xml (copied from maven-mcp) +│ ├── TEST-com.example.SkippedTest.xml (copied from maven-mcp) +│ └── TEST-com.example.ErrorTest.xml (copied from maven-mcp) +├── failsafe-reports/ +│ ├── TEST-com.example.DbIntegrationIT.xml (synthesized — ApplicationContext failure) +│ └── TEST-com.example.PortConflictIT.xml (synthesized — socket-bind failure) +├── poms/ +│ ├── single-module-pom.xml (explicit ) +│ ├── multi-module-parent-pom.xml (parent POM, has ) +│ ├── child-pom.xml (no , has ) +│ └── no-groupid-pom.xml (edge case — returns None) +└── stack-traces/ + ├── simple-assertion.txt (1-segment, short) + ├── caused-by-chain.txt (3-segment, framework-heavy) + └── suppressed-nested.txt (Suppressed + indented Caused by) +``` + +### Data flow + +``` +run_test(args): + started_at = SystemTime::now() # captured BEFORE exec + cwd = std::env::current_dir()? + app_pkg = pom_groupid::detect(&cwd) # cached per-cwd + + output = execute_command("mvn", ...) + + text_summary = filter_mvn_test(&output.stdout) # PURE — existing tests unchanged + enriched = enrich_with_reports( # NEW I/O layer + &text_summary, &cwd, started_at, app_pkg.as_deref(), + ) + + tracking::record(...) + print!("{enriched}") + exit(output.status.code().unwrap_or(1)) +``` + +`filter_mvn_test(output: &str) -> String` remains unchanged. `enrich_with_reports` is the only new I/O surface in the test path. + +### Time-gate rationale + +Developers and CI rerun builds frequently; stale XML reports from previous runs would pollute diagnostic output with false failures and inflate counts. `started_at` is captured in `run_test` just before process spawn. `surefire_reports::parse_dir` compares each file's `mtime` against `since` and skips older files. This mirrors `dotnet_trx::parse_trx_file_since`. + +## Components + +### `surefire_reports.rs` + +**Constants (1:1 with maven-mcp):** + +```rust +pub const DEFAULT_STACK_TRACE_LINES: usize = 50; +pub const DEFAULT_PER_TEST_OUTPUT_LIMIT: usize = 2000; +pub const DEFAULT_TOTAL_OUTPUT_LIMIT: usize = 10000; +``` + +**Public types:** + +```rust +pub struct TestSummary { + pub run: u32, + pub failures: u32, + pub errors: u32, + pub skipped: u32, +} + +pub enum FailureKind { Failure, Error } + +pub struct TestFailure { + pub test_class: String, + pub test_method: String, + pub kind: FailureKind, + pub message: Option, // from message="..." attribute + pub failure_type: Option, // from type="..." attribute + pub stack_trace: Option, // processed via stack_trace::process + pub test_output: Option, // combined system-out + system-err, truncated to per-test limit +} + +pub struct SurefireResult { + pub summary: TestSummary, + pub failures: Vec, + pub files_read: usize, + pub files_skipped_stale: usize, + pub files_malformed: usize, +} +``` + +**Public API:** + +```rust +pub fn parse_dir( + dir: &Path, + since: Option, + app_package: Option<&str>, +) -> Option; + +pub fn parse_dir_with_limits( + dir: &Path, + since: Option, + app_package: Option<&str>, + per_test_output_limit: usize, + total_output_limit: usize, + stack_trace_lines: usize, +) -> Option; +``` + +Returns `None` when the directory does not exist or contains no `TEST-*.xml` files that pass the time-gate. Returns `Some(SurefireResult { summary: zero, failures: empty, … })` when fresh files exist but none contain failures. + +**Parsing strategy (quick-xml streaming):** + +Event loop with enum state `{Idle, InTestsuite, InTestcase, InFailureText, InErrorText, InSystemOut, InSystemErr}`. Transitions: + +- `Start("testsuite")`: read attributes `tests`, `failures`, `errors`, `skipped`; add to per-file totals. Push state `InTestsuite`. +- `Start("testcase")` inside `InTestsuite`: read `classname`, `name`. Save as `current_testcase`. Push state `InTestcase`. +- `Start("failure")` / `Start("error")` inside `InTestcase`: read `message`, `type` attrs. Push state `InFailureText` / `InErrorText`. Begin text accumulator. +- `Text` inside `InFailureText` / `InErrorText`: append to stack_trace buffer. +- `End("failure")` / `End("error")`: finalize stack trace via `stack_trace::process(raw, app_package, DEFAULT_STACK_TRACE_LINES)`. Record failure. Pop state. +- `Start("system-out")` / `Start("system-err")` inside `InTestcase`: if current testcase has a failure/error already recorded, push state and begin buffer. Otherwise ignore (we don't extract logs from passing tests — matches maven-mcp behavior). +- `End("system-out")` / `End("system-err")`: append buffered text to `TestFailure::test_output` (with `[STDERR]` separator if both present), pop state. +- `End("testcase")`: pop state. Clear `current_testcase`. +- `End("testsuite")`: pop state. Accumulate this file's summary into per-dir totals. + +After all files processed, run `apply_total_output_limit` (iterate failures, cumulative length of `test_output`; once exceeds 10000, null out remaining `test_output` fields). + +**File selection:** + +- `read_dir(dir)` → filter entries where `file_name().starts_with("TEST-") && file_name().ends_with(".xml")`. +- For each, check `metadata().modified()?` against `since`. Increment `files_skipped_stale` on skip. +- On parse failure (malformed XML, IO error), increment `files_malformed`, emit `eprintln!("rtk mvn: skipping malformed {}", name)`, continue. Never panic. + +**Error handling:** `anyhow::Result>` internally; public wrapper swallows the Err variant and returns `None` after logging. The enrichment layer must never crash `mvn_cmd` — mvn already ran, output must flow. + +### `stack_trace.rs` + +**Constants (1:1 with maven-mcp):** + +```rust +const DEFAULT_ROOT_CAUSE_APP_FRAMES: usize = 10; +const MAX_HEADER_LENGTH: usize = 200; +``` + +**Types:** + +```rust +struct Segment { + header: String, + frames: Vec, +} +``` + +**Public API:** + +```rust +pub fn process( + raw: &str, + app_package: Option<&str>, // None = keep all frames (no classification) + max_lines: usize, // 0 = no hard cap +) -> Option; +``` + +Returns `None` iff `raw` is empty or whitespace-only. + +**Algorithm (1:1 port):** + +1. **`parse_segments(trace)`**: split lines. First non-empty line is top-level header. Each subsequent line starting with `"Caused by:"` (exact prefix match, no leading whitespace — critical: indented `"\tCaused by:"` stays as a frame inside Suppressed blocks) closes the current segment and opens a new one. All other lines append to current segment's frames. + +2. **`is_structural_line(line)`**: returns `true` if: + - `line.trim_start().starts_with("Suppressed:")`, OR + - `line.starts_with(char::is_whitespace)` AND `line.trim_start().starts_with("Caused by:")` (nested in Suppressed). + +3. **`is_application_frame(line, app_package)`**: if `app_package.is_none()`, return `true`. Otherwise strip leading whitespace, strip `"at "` prefix. If remainder starts with `app_package`, return `true`. Lines like `"\t... 42 more"` return `false`. + +4. **`add_collapsed_frames(output, frames, app_package)`** (top-level + intermediate segments): + - Iterate frames. Count consecutive framework frames. + - When hitting app or structural frame: if counter > 0, push `"\t... N framework frames omitted"` and reset. Push the app/structural frame (structural goes through `truncate_header`). + - At end of loop: flush remaining framework-frame counter. + +5. **`add_root_cause_frames(output, frames, app_package)`**: + - Same as above, but also count `app_frames_emitted`. Structural frames bypass the cap; non-structural app frames stop being emitted once `app_frames_emitted >= DEFAULT_ROOT_CAUSE_APP_FRAMES`. + +6. **`apply_hard_cap(output_lines, segments, max_lines)`**: + - Segment count ≤ 1: `output[..max_lines]`. + - Multi-segment: find root-cause header (last segment's truncated header) in output. If its index ≥ max_lines − 1, build synthetic: `[top_header, "\t... (intermediate frames truncated)", root_header, …root-cause frames until cap]`. Otherwise truncate at max_lines. + +7. **`truncate_header(line)`**: if `line.chars().count() > MAX_HEADER_LENGTH`, return first `MAX_HEADER_LENGTH` chars + `"..."`. UTF-8 safe via `utils::truncate`. + +**`process()` orchestration:** + +``` +segments = parse_segments(raw.trim()) +if segments.empty: return Some(raw.trim()) + +let filter = app_package.is_some_and(|p| !p.is_empty()) +let mut out = Vec::new() +out.push(truncate_header(&segments[0].header)) + +if segments.len() == 1: + add_collapsed_frames(&mut out, &segments[0].frames, app_package, filter) +else: + add_collapsed_frames(&mut out, &segments[0].frames, app_package, filter) + for seg in &segments[1..segments.len()-1]: + out.push(truncate_header(&seg.header)) + add_collapsed_frames(&mut out, &seg.frames, app_package, filter) + let root = segments.last().unwrap() + out.push(truncate_header(&root.header)) + add_root_cause_frames(&mut out, &root.frames, app_package, filter) + +if max_lines > 0 && out.len() > max_lines: + out = apply_hard_cap(out, segments, max_lines) + +Some(out.join("\n")) +``` + +### `pom_groupid.rs` + +**Public API:** + +```rust +pub fn detect(cwd: &Path) -> Option; +``` + +**Algorithm:** + +1. If env var `RTK_MVN_APP_PACKAGE` is set and non-empty, return its value (override always wins). +2. Check thread-local cache `(PathBuf, Option)` keyed by `cwd`. If hit, return cached value. +3. Resolve `cwd.join("pom.xml")`. If missing, cache `None` and return. +4. Stream-parse pom.xml with quick-xml, tracking element stack depth: + - When inside top-level `` (depth 1 under ``), catch `` → capture text, return after ``. + - If no top-level `` found during first pass, fall back: look for `//` (depth 2). + - Return first match, or `None`. +5. Cache result. Return. + +**Streaming impl notes:** + +- Single pass with a small state machine: track stack of tag names. +- Short-circuit: once first groupId found at valid depth, close reader and return. +- Malformed XML: return `None` silently (never panic). + +### `mvn_cmd.rs` integration + +**New helper:** + +```rust +pub(crate) fn enrich_with_reports( + text_summary: &str, + cwd: &Path, + since: SystemTime, + app_package: Option<&str>, +) -> String; +``` + +**Logic:** + +``` +if !text_summary.starts_with("mvn "): + return text_summary.to_owned() // defensive — shouldn't happen + +let looks_clean = contains("passed (") && !contains("failed") && !contains("BUILD FAILURE") +let zero_tests = text_summary == "mvn test: no tests run" || contains("0 passed") +let has_failures = contains(" failed") || contains("BUILD FAILURE") + +if looks_clean && !zero_tests: + return text_summary.to_owned() // optimization — happy path, no I/O + +let sf = surefire_reports::parse_dir(&cwd.join("target/surefire-reports"), Some(since), app_package) +let fs = surefire_reports::parse_dir(&cwd.join("target/failsafe-reports"), Some(since), app_package) + +match (zero_tests, has_failures, &sf, &fs): + (true, _, None, None) => + "mvn test: 0 tests executed — surefire nie wykrył testów. \ + Sprawdź pom.xml (plugin surefire configuration) lub uruchom: rtk proxy mvn test" + (true, _, Some(r), _) if r.summary.run > 0 => + // reports show tests ran; text said zero — trust reports + render_enriched(text_summary, sf.as_ref(), fs.as_ref(), zero_tests) + (_, true, None, None) => + format!("{text_summary}\n(no XML reports found — check target/surefire-reports/ \ + or run: rtk proxy mvn test)") + _ => + render_enriched(text_summary, sf.as_ref(), fs.as_ref(), zero_tests) +``` + +**Rendering format (`render_enriched`):** + +``` + + +Failures (from surefire-reports/): +1. com.example.UserServiceTest.shouldReturnUser + AssertionFailedError: expected:<200> but was:<404> + org.opentest4j.AssertionFailedError: expected:<200> but was:<404> + at com.example.UserServiceTest.shouldReturnUser(UserServiceTest.java:42) + ... 8 framework frames omitted + +2. com.example.OrderServiceTest.shouldHandleNull + AssertionError: Unexpected exception + java.lang.AssertionError: Unexpected exception: NullPointerException + at com.example.OrderServiceTest.shouldHandleNull(OrderServiceTest.java:55) + +Integration failures (from failsafe-reports/): +1. com.example.DbIntegrationIT.shouldConnect + Caused by: HibernateException + Caused by: org.hibernate.HibernateException: Unable to acquire JDBC Connection + at com.example.DbIntegrationIT.shouldConnect(DbIntegrationIT.java:88) + ... 14 framework frames omitted + + captured stderr: + Connection refused (Connection refused) + +(reports: 12 surefire, 1 failsafe, 3 stale files skipped) +``` + +Key rendering rules: + +- Cap at **10 failures per source** (10 surefire + 10 failsafe, each independently). Append `"\n... +N more failures"` under the relevant section heading when truncated. Matches the text filter's existing `MAX_FAILURES_SHOWN = 10` convention. +- `message` field goes on the second line (short summary from `` attributes; falls back to the first line of the stack trace if `message` is empty). Full stack trace on subsequent lines, indented 5 spaces. +- `test_output` (combined `` + `` as stored in `TestFailure::test_output`), when present and non-empty, renders as a single block labeled `captured output:` (labeling distinguishes stdout-only, stderr-only, and combined via the `[STDERR]` separator already embedded in the buffer). +- Footer line `(reports: …)` only when at least one file was read or skipped. Format: `(reports: N surefire, M failsafe, K stale files skipped, J malformed)` — omit count-components that are zero. + +## Data / Models + +Fixtures from maven-mcp are copied verbatim to `tests/fixtures/java/surefire-reports/`. Synthesized failsafe fixtures: + +- `TEST-com.example.DbIntegrationIT.xml`: 3-segment Caused-by chain (wrapper → SpringContextException → HibernateException), 40+ frames, system-err with JDBC error. +- `TEST-com.example.PortConflictIT.xml`: 1-segment, short, SocketException, system-err with "address already in use". + +POM fixtures are minimal: + +```xml + +com.example.appapp1.0 + + + + com.example.appparent1.0 + child + +``` + +## Tests + +### Unit + +**`surefire_reports::tests`:** + +- `parse_dir_happy` — single passing XML → `summary{run=3, failures=0}`, `failures.is_empty()`. +- `parse_dir_with_failures` — FailingTest → 2 `TestFailure` entries with stack traces and messages. +- `parse_dir_with_logs` — FailingTestWithLogs → `test_output` contains stdout + `[STDERR]` + stderr; passing test's `` NOT extracted. +- `parse_dir_multi_file` — 5 XMLs in dir → summary aggregates across all files. +- `parse_dir_time_gate` — fixtures copied into a `tempfile::TempDir` with `filetime::set_file_mtime` set before `since` → `files_skipped_stale == n`. Requires adding `filetime = "0.2"` to `[dev-dependencies]` (not present today; runtime code uses `std::fs::Metadata::modified()` only). +- `parse_dir_malformed_graceful` — corrupt XML in dir → `files_malformed == 1`, other files still parsed. +- `parse_dir_missing_returns_none` — non-existent dir → `None`. +- `parse_dir_empty_returns_none` — dir exists but no `TEST-*.xml` → `None`. +- `total_output_limit_applied` — 10+ failures with large test_output → later entries have `test_output == None`. + +**`stack_trace::tests`** (port tests from `StackTraceProcessorTest.java`; enumerate during implementation): + +- Single segment, no filter → returns verbatim. +- Single segment, with app_package → framework frames collapsed. +- Three-segment Caused-by chain → top/intermediate collapsed, root-cause preserved. +- Suppressed block with indented `Caused by:` → structural lines preserved, not parsed as segments. +- Hard cap with root cause beyond limit → synthetic intermediate-truncated output. +- Hard cap with root cause within limit → straight truncate. +- Header >200 chars → truncated with `"..."`. +- UTF-8 in frames (Japanese class names) → no panic, char-boundary-safe truncation. + +**`pom_groupid::tests`:** + +- single-module POM → `Some("com.example.app")`. +- child POM (no groupId, has parent.groupId) → `Some("com.example.app")`. +- no-groupid POM → `None`. +- missing pom.xml → `None`. +- env var `RTK_MVN_APP_PACKAGE=com.override` set → returns `"com.override"` regardless of pom content. +- malformed pom.xml → `None`, no panic. + +### Integration (in `mvn_cmd.rs`) + +- `enrich_happy_path_no_io` — text `"mvn test: 32 passed (11.6s)"` → returns verbatim, no directory reads (verify via `tempdir` with no `target/` present). +- `enrich_with_failures_snapshot` — copy surefire fixtures to `tempdir/target/surefire-reports/`, set mtime to `now`, invoke `enrich_with_reports` → insta snapshot. +- `enrich_with_both_reports_snapshot` — surefire + failsafe fixtures → snapshot with both sections. +- `enrich_red_flag_no_tests` — text `"mvn test: no tests run"`, empty `target/` → returns red-flag message. +- `enrich_stale_reports_skipped` — fixtures with `mtime` before `since` → `files_skipped_stale > 0`, no failures in output. +- `enrich_malformed_xml_does_not_crash` — one malformed XML in fixture dir → output still produced, `files_malformed == 1`. + +### Token savings + +- `savings_enriched_failures` — real multi-module `mvn verify` log (~2000 lines) with synthesized surefire + failsafe XMLs → enriched output ≤ 400 lines. Assert `savings >= 85%`. +- `savings_happy_path_unchanged` — happy path fixture → assert `savings >= 97%`. No enrichment, no I/O. + +### Snapshots (insta) + +- `snap_enriched_surefire_only.snap` +- `snap_enriched_surefire_and_failsafe.snap` +- `snap_enriched_truncated_stack.snap` — 200-frame trace, cap at 50 lines. +- `snap_enriched_multi_caused_by.snap` — 3-segment chain with root-cause preserved. +- `snap_red_flag_no_tests.snap` +- `snap_fallback_no_xml_reports.snap` + +### Performance + +`hyperfine` check: `target/release/rtk mvn test` on a project with 50 `TEST-*.xml` files must complete within +5ms vs. text-only path. I/O is the only new cost; budget is generous to account for disk cache cold-start. + +## Implementation Plan + +Commits, in order, each reviewable in isolation: + +1. **`feat(mvn): port StackTraceProcessor to Rust`** (~400 LoC) + - `src/cmds/java/stack_trace.rs` + tests + - Fixtures: `tests/fixtures/java/stack-traces/*.txt` + - No integration yet. Module compiles standalone with full test coverage. + +2. **`feat(mvn): add SurefireReportParser`** (~500 LoC) + - `src/cmds/java/surefire_reports.rs` + tests + - Fixtures: 5 XMLs from maven-mcp + 2 synthesized failsafe XMLs + - Depends on `stack_trace.rs` for stack processing. + +3. **`feat(mvn): autodetect appPackage from pom.xml`** (~150 LoC) + - `src/cmds/java/pom_groupid.rs` + tests + - Fixtures: 4 POM files + - Independent of parsers above. + +4. **`feat(mvn): enrich test output with XML reports`** (~250 LoC) + - Modify `mvn_cmd.rs`: capture `started_at`, call `detect`, call `enrich_with_reports`. + - Implement `enrich_with_reports` + `render_enriched`. + - Snapshot tests + integration tests. + - No-tests red-flag heuristic. + - Existing `filter_mvn_test` tests untouched. + +5. **`docs(mvn): document surefire/failsafe XML enrichment`** + - Update `src/cmds/java/README.md`. + - Add section to `CHANGELOG.md`. + - Note `RTK_MVN_APP_PACKAGE` env var. + +## Risks / Trade-offs + +**[Risk] Surefire XML format drift.** Surefire 3.x has been stable and the format is widely consumed (IntelliJ, CI, Maven itself). Any drift would break our snapshot tests first and fixes would be local. Mitigation: snapshot tests on real-world fixtures, port maven-mcp's proven parser. + +**[Risk] File I/O on every failing `mvn test`.** Directory read + N file opens + N XML parses. On a 50-file repo this is ~5ms cold cache, <1ms warm. Acceptable for diagnostic path; skipped entirely on happy path via `looks_clean` short-circuit. + +**[Risk] `appPackage` autodetect picks wrong package in polyglot monorepos.** A repo with `pom.xml` at root but mixed Java/Kotlin/Scala modules might have a groupId that doesn't match the failing test's package. Consequence: framework-frame collapsing disabled (all frames kept). This degrades gracefully — output is slightly noisier but never wrong. Override via `RTK_MVN_APP_PACKAGE`. + +**[Risk] Stale reports from previous runs slip through time-gate.** Only if a previous `mvn` wrote XML, the user then `touch`ed the files, then ran `rtk mvn test` without the filter re-writing them (e.g., compilation failed before surefire ran). Result: yesterday's failures shown as today's. Mitigation: the time-gate uses `mtime`; surefire always rewrites. Only a manual `touch` defeats this, which is user error. Worst-case impact is bounded to diagnostic noise, never incorrect exit codes. + +**[Trade-off] Enrichment increases output size on failures.** Savings drop from 90%+ to ~85% on enriched paths. This is the whole point: we trade compression for signal exactly when signal matters. Happy path remains maximally compressed. + +**[Trade-off] Port duplicates maven-mcp's Java code.** Future fixes require updates in two places. Mitigated by the algorithms being small, well-defined, and stable. No live sync needed. + +**[Trade-off] We read XML even when text filter correctly identified all failures.** Accepted: the XML is the source of truth. Text parsing of stderr `[ERROR]` messages is fragile; XML is canonical. If counts disagree between text and XML, XML wins in the enriched rendering. + +## Open Questions + +None blocking. The design is a direct port with explicit decisions above. + +## References + +- Java original: `/home/mariusz/projects/maven-mcp/src/main/java/io/github/mavenmcp/parser/SurefireReportParser.java` +- Stack trace original: `/home/mariusz/projects/maven-mcp/src/main/java/io/github/mavenmcp/parser/StackTraceProcessor.java` +- Design doc (Java): `/home/mariusz/projects/maven-mcp/openspec/changes/archive/2026-02-15-surefire-parser-and-test-tool/design.md` +- Rust prior art for XML-report parsing: `src/cmds/dotnet/dotnet_trx.rs` +- Related PR (upstream base): `rtk-ai/rtk#1089` (feat(mvn): add Maven (Java) filter module) +- Fork: `mariuszs/rtk-java` (PR target: `master`) From aa7d77af79ee7acb082820b943a31901a1c37ebe Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 20:57:00 +0200 Subject: [PATCH 22/44] =?UTF-8?q?docs(mvn):=20implementation=20plan=20?= =?UTF-8?q?=E2=80=94=20surefire/failsafe=20XML=20enrichment?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 21 tasks, TDD red-green-commit per step, targets our fork's master via feat/mvn-surefire-xml stacked on feat/mvn-rust-module. Covers the full spec: stack_trace port, surefire_reports parser, pom_groupid autodetect, mvn_cmd integration, snapshot + savings tests, docs, PR. --- .../2026-04-15-mvn-surefire-xml-enrichment.md | 3044 +++++++++++++++++ 1 file changed, 3044 insertions(+) create mode 100644 docs/superpowers/plans/2026-04-15-mvn-surefire-xml-enrichment.md diff --git a/docs/superpowers/plans/2026-04-15-mvn-surefire-xml-enrichment.md b/docs/superpowers/plans/2026-04-15-mvn-surefire-xml-enrichment.md new file mode 100644 index 000000000..c1cbb595c --- /dev/null +++ b/docs/superpowers/plans/2026-04-15-mvn-surefire-xml-enrichment.md @@ -0,0 +1,3044 @@ +# mvn Surefire/Failsafe XML Enrichment Implementation Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Port maven-mcp's `SurefireReportParser` + `StackTraceProcessor` to Rust, plus a pom.xml groupId autodetect and an integration layer that enriches `rtk mvn test` output with structured failure details read from `target/surefire-reports/` and `target/failsafe-reports/` XML. + +**Architecture:** Three pure new modules under `src/cmds/java/` (`stack_trace.rs`, `surefire_reports.rs`, `pom_groupid.rs`) plus a post-text-filter I/O layer in `mvn_cmd.rs`. The existing `filter_mvn_test` string transformer stays untouched; a new `enrich_with_reports(text, cwd, since, app_pkg)` function reads XML reports (time-gated by `started_at`) and appends a structured failures section. Auto-detected `appPackage` feeds `stack_trace::process` for framework-frame collapsing with root-cause preservation. + +**Tech Stack:** Rust, `quick-xml = "0.37"` (already in deps, used by `dotnet_trx.rs`), `anyhow`, `lazy_static`, `insta` for snapshots, `tempfile` for integration tests, `filetime = "0.2"` added as dev-dep for mtime-based time-gate tests. + +**Spec:** `docs/superpowers/specs/2026-04-15-mvn-surefire-xml-enrichment-design.md` + +**Fork / PR target:** `mariuszs/rtk-java`, branch `feat/mvn-surefire-xml` stacked on `feat/mvn-rust-module`, PR into fork's `master`. + +--- + +## Task 0: Branch, scaffolding, dev-dep + +**Files:** +- Modify: `Cargo.toml` (add `filetime` dev-dep) +- Create: `src/cmds/java/stack_trace.rs` (empty stub) +- Create: `src/cmds/java/surefire_reports.rs` (empty stub) +- Create: `src/cmds/java/pom_groupid.rs` (empty stub) +- Create: `tests/fixtures/java/surefire-reports/.gitkeep` +- Create: `tests/fixtures/java/failsafe-reports/.gitkeep` +- Create: `tests/fixtures/java/poms/.gitkeep` +- Create: `tests/fixtures/java/stack-traces/.gitkeep` + +Note: `src/cmds/java/mod.rs` is `automod::dir!(pub "src/cmds/java");` — it auto-exports every `.rs` file in the directory. No manual module wiring needed; the stubs will be picked up automatically once they compile. + +- [ ] **Step 0.1: Create and switch to branch** + +Run: +```bash +git checkout feat/mvn-rust-module +git checkout -b feat/mvn-surefire-xml +git status +``` +Expected: `On branch feat/mvn-surefire-xml`, clean working tree. + +- [ ] **Step 0.2: Add `filetime` dev-dep** + +Edit `Cargo.toml`, change the `[dev-dependencies]` block from: +```toml +[dev-dependencies] +``` +to: +```toml +[dev-dependencies] +filetime = "0.2" +insta = "1" +``` + +Note: verify `insta` is not already declared elsewhere in `Cargo.toml` before adding. If `insta` is already in `[dependencies]`, only add `filetime`. + +- [ ] **Step 0.3: Check `insta` availability** + +Run: +```bash +grep -n '^insta' Cargo.toml +``` +If `insta = ...` is already listed under `[dependencies]`, remove the `insta = "1"` line you added in step 0.2 (keep only `filetime = "0.2"`). + +- [ ] **Step 0.4: Create module stubs** + +Create `src/cmds/java/stack_trace.rs`: +```rust +//! Port of maven-mcp's StackTraceProcessor. +//! +//! Parses Java stack traces into segments (top-level exception + Caused by +//! chains), classifies frames as application or framework by package prefix, +//! collapses framework noise, and preserves root-cause frames. +``` + +Create `src/cmds/java/surefire_reports.rs`: +```rust +//! Parses Maven Surefire/Failsafe XML test reports from +//! `target/surefire-reports/TEST-*.xml` and `target/failsafe-reports/*.xml`. +//! Uses quick-xml streaming parser. Time-gated by `started_at` to skip stale +//! reports from previous runs. +``` + +Create `src/cmds/java/pom_groupid.rs`: +```rust +//! Autodetects the application Java package from `pom.xml `. +//! Used by `surefire_reports` / `stack_trace` to classify application frames. +//! Can be overridden by `RTK_MVN_APP_PACKAGE` env var. +``` + +Create empty fixture directories: +```bash +mkdir -p tests/fixtures/java/{surefire-reports,failsafe-reports,poms,stack-traces} +touch tests/fixtures/java/{surefire-reports,failsafe-reports,poms,stack-traces}/.gitkeep +``` + +- [ ] **Step 0.5: Verify build** + +Run: +```bash +cargo build +``` +Expected: PASS. `automod` auto-discovers the new modules; they're empty and harmless. + +- [ ] **Step 0.6: Commit scaffolding** + +```bash +git add Cargo.toml src/cmds/java/stack_trace.rs src/cmds/java/surefire_reports.rs src/cmds/java/pom_groupid.rs tests/fixtures/java/ +git commit -m "chore(mvn): scaffold surefire-xml modules and fixture dirs + +Empty stubs for stack_trace, surefire_reports, pom_groupid. Adds +filetime dev-dep for mtime-based time-gate tests in later tasks." +``` + +--- + +## Task 1: `stack_trace::Segment` + `parse_segments` + +**Files:** +- Modify: `src/cmds/java/stack_trace.rs` + +- [ ] **Step 1.1: Write failing tests** + +Append to `src/cmds/java/stack_trace.rs`: +```rust +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_segments_empty_input_returns_empty() { + assert!(parse_segments("").is_empty()); + } + + #[test] + fn parse_segments_single_header_no_frames() { + let trace = "java.lang.RuntimeException: boom"; + let segs = parse_segments(trace); + assert_eq!(segs.len(), 1); + assert_eq!(segs[0].header, "java.lang.RuntimeException: boom"); + assert!(segs[0].frames.is_empty()); + } + + #[test] + fn parse_segments_single_segment_with_frames() { + let trace = "java.lang.RuntimeException: boom\n\ + \tat com.example.A.foo(A.java:1)\n\ + \tat com.example.B.bar(B.java:2)"; + let segs = parse_segments(trace); + assert_eq!(segs.len(), 1); + assert_eq!(segs[0].frames.len(), 2); + } + + #[test] + fn parse_segments_caused_by_starts_new_segment() { + let trace = "java.lang.RuntimeException: outer\n\ + \tat com.example.A.foo(A.java:1)\n\ + Caused by: java.io.IOException: inner\n\ + \tat com.example.B.bar(B.java:2)"; + let segs = parse_segments(trace); + assert_eq!(segs.len(), 2); + assert_eq!(segs[0].header, "java.lang.RuntimeException: outer"); + assert_eq!(segs[0].frames, vec!["\tat com.example.A.foo(A.java:1)"]); + assert_eq!(segs[1].header, "Caused by: java.io.IOException: inner"); + assert_eq!(segs[1].frames, vec!["\tat com.example.B.bar(B.java:2)"]); + } + + #[test] + fn parse_segments_indented_caused_by_stays_as_frame() { + // Inside a Suppressed block, the "Caused by:" is indented and must NOT + // split segments — it stays as a frame so structural handling keeps it. + let trace = "java.lang.RuntimeException: outer\n\ + \tSuppressed: java.io.IOException: suppressed\n\ + \t\tat com.example.A.foo(A.java:1)\n\ + \t\tCaused by: java.lang.Error: nested\n\ + Caused by: java.io.IOException: real cause"; + let segs = parse_segments(trace); + assert_eq!(segs.len(), 2, "indented Caused by must not split segments"); + assert_eq!(segs[0].frames.len(), 3, "Suppressed block stays in outer"); + assert_eq!(segs[1].header, "Caused by: java.io.IOException: real cause"); + } +} +``` + +- [ ] **Step 1.2: Run tests to verify they fail** + +Run: +```bash +cargo test --lib stack_trace::tests +``` +Expected: FAIL — `parse_segments` and `Segment` not defined. + +- [ ] **Step 1.3: Implement `Segment` and `parse_segments`** + +Prepend to `src/cmds/java/stack_trace.rs` (above the `#[cfg(test)]` block): +```rust +#[derive(Debug, PartialEq)] +pub(crate) struct Segment { + pub(crate) header: String, + pub(crate) frames: Vec, +} + +/// Split a stack trace into segments. +/// +/// The first non-empty line becomes the header of segment 0. Each subsequent +/// line starting with the literal `"Caused by:"` (no leading whitespace) closes +/// the current segment and opens a new one. All other lines append to the +/// current segment's frames. +/// +/// Indented `"\tCaused by:"` inside Suppressed blocks stays as a frame and +/// does NOT split segments — `is_structural_line` preserves it during frame +/// collapsing. +pub(crate) fn parse_segments(trace: &str) -> Vec { + let trace = trace.trim(); + if trace.is_empty() { + return Vec::new(); + } + + let mut segments = Vec::new(); + let mut current_header: Option = None; + let mut current_frames: Vec = Vec::new(); + + for line in trace.lines() { + if current_header.is_none() { + current_header = Some(line.to_string()); + } else if line.starts_with("Caused by:") { + segments.push(Segment { + header: current_header.take().unwrap(), + frames: std::mem::take(&mut current_frames), + }); + current_header = Some(line.to_string()); + } else { + current_frames.push(line.to_string()); + } + } + + if let Some(header) = current_header { + segments.push(Segment { + header, + frames: current_frames, + }); + } + + segments +} +``` + +- [ ] **Step 1.4: Run tests to verify they pass** + +Run: +```bash +cargo test --lib stack_trace::tests +``` +Expected: 5 PASS. + +- [ ] **Step 1.5: Commit** + +```bash +git add src/cmds/java/stack_trace.rs +git commit -m "feat(mvn): add stack trace segment parser + +Splits Java stack traces on top-level 'Caused by:' while keeping +indented Caused by lines inside Suppressed blocks as frames." +``` + +--- + +## Task 2: `truncate_header` with UTF-8 safety + +**Files:** +- Modify: `src/cmds/java/stack_trace.rs` + +- [ ] **Step 2.1: Add tests** + +Append to the `tests` module in `src/cmds/java/stack_trace.rs`: +```rust + #[test] + fn truncate_header_short_passes_through() { + assert_eq!(truncate_header("short"), "short"); + } + + #[test] + fn truncate_header_exact_200_chars_passes() { + let s = "a".repeat(200); + assert_eq!(truncate_header(&s), s); + } + + #[test] + fn truncate_header_over_200_chars_truncates_with_ellipsis() { + let s = "a".repeat(250); + let out = truncate_header(&s); + assert_eq!(out.chars().count(), 203); // 200 + "..." + assert!(out.ends_with("...")); + } + + #[test] + fn truncate_header_utf8_multibyte_safe() { + // 100 4-byte chars = 400 bytes but 100 chars — must not panic + let s = "日".repeat(100); + assert_eq!(truncate_header(&s), s); + let s = "日".repeat(250); + let out = truncate_header(&s); + assert_eq!(out.chars().count(), 203); + assert!(out.ends_with("...")); + } +``` + +- [ ] **Step 2.2: Run tests to verify they fail** + +Run: +```bash +cargo test --lib stack_trace::tests::truncate_header +``` +Expected: FAIL — `truncate_header` not defined. + +- [ ] **Step 2.3: Implement `truncate_header` (and consts)** + +Add near the top of `src/cmds/java/stack_trace.rs` (below the doc comment): +```rust +const MAX_HEADER_LENGTH: usize = 200; +``` + +Add below `parse_segments`: +```rust +/// Truncate a header to `MAX_HEADER_LENGTH` **Unicode characters** (not bytes), +/// appending "..." if truncated. +pub(crate) fn truncate_header(header: &str) -> String { + let char_count = header.chars().count(); + if char_count <= MAX_HEADER_LENGTH { + return header.to_string(); + } + let truncated: String = header.chars().take(MAX_HEADER_LENGTH).collect(); + format!("{truncated}...") +} +``` + +- [ ] **Step 2.4: Run tests** + +Run: +```bash +cargo test --lib stack_trace::tests +``` +Expected: 9 PASS (5 parse_segments + 4 truncate_header). + +- [ ] **Step 2.5: Commit** + +```bash +git add src/cmds/java/stack_trace.rs +git commit -m "feat(mvn): add UTF-8-safe stack trace header truncation + +Counts Unicode chars, not bytes. 200-char cap matches maven-mcp original." +``` + +--- + +## Task 3: Frame classification — `is_application_frame`, `is_structural_line` + +**Files:** +- Modify: `src/cmds/java/stack_trace.rs` + +- [ ] **Step 3.1: Add tests** + +Append to tests: +```rust + #[test] + fn is_app_frame_no_filter_accepts_everything() { + assert!(is_application_frame("\tat com.example.A.foo(A.java:1)", None)); + assert!(is_application_frame("\tat org.springframework.boot.Run(Run.java:1)", None)); + assert!(is_application_frame("\t... 42 more", None)); + } + + #[test] + fn is_app_frame_with_package_accepts_matching() { + assert!(is_application_frame( + "\tat com.example.A.foo(A.java:1)", + Some("com.example"), + )); + assert!(!is_application_frame( + "\tat org.springframework.boot.Run(Run.java:1)", + Some("com.example"), + )); + } + + #[test] + fn is_app_frame_rejects_summary_dots() { + // "\t... 42 more" is a framework artifact, never app + assert!(!is_application_frame("\t... 42 more", Some("com.example"))); + } + + #[test] + fn is_app_frame_rejects_empty_or_whitespace() { + assert!(!is_application_frame("", Some("com.example"))); + assert!(!is_application_frame(" ", Some("com.example"))); + } + + #[test] + fn is_structural_suppressed_top_level() { + assert!(is_structural_line("\tSuppressed: java.io.IOException")); + assert!(is_structural_line("Suppressed: foo")); + } + + #[test] + fn is_structural_indented_caused_by_only() { + // Top-level "Caused by:" is a segment boundary, not structural + assert!(!is_structural_line("Caused by: java.io.IOException")); + // Indented "Caused by:" inside suppressed is structural + assert!(is_structural_line("\tCaused by: java.io.IOException")); + assert!(is_structural_line(" Caused by: nested")); + } + + #[test] + fn is_structural_regular_frame_no() { + assert!(!is_structural_line("\tat com.example.A.foo(A.java:1)")); + assert!(!is_structural_line("")); + } +``` + +- [ ] **Step 3.2: Run tests to verify they fail** + +Run: +```bash +cargo test --lib stack_trace::tests +``` +Expected: 6 new FAIL on `is_application_frame` / `is_structural_line`. + +- [ ] **Step 3.3: Implement** + +Add below `truncate_header`: +```rust +/// A stack frame belongs to the application if, after stripping whitespace and +/// the leading `"at "` marker, the remainder starts with `app_package`. +/// +/// When `app_package` is `None` or empty, every frame is considered an app frame +/// (framework collapsing disabled). Summary lines like `"\t... 42 more"` are +/// always framework artifacts. +pub(crate) fn is_application_frame(frame: &str, app_package: Option<&str>) -> bool { + let Some(pkg) = app_package.filter(|p| !p.is_empty()) else { + return true; + }; + let trimmed = frame.trim_start(); + let Some(after_at) = trimmed.strip_prefix("at ") else { + return false; + }; + after_at.starts_with(pkg) +} + +/// Structural lines must always be preserved even while collapsing framework +/// frames: Suppressed block headers and **indented** Caused-by lines (which +/// appear inside Suppressed blocks; top-level Caused-by is already a segment +/// boundary, not a frame). +pub(crate) fn is_structural_line(line: &str) -> bool { + if line.is_empty() { + return false; + } + let trimmed = line.trim_start(); + if trimmed.starts_with("Suppressed:") { + return true; + } + if trimmed.starts_with("Caused by:") { + // Only structural when indented (nested in suppressed). Top-level + // Caused by: is handled by parse_segments, not here. + return line + .chars() + .next() + .is_some_and(char::is_whitespace); + } + false +} +``` + +- [ ] **Step 3.4: Run tests** + +Run: +```bash +cargo test --lib stack_trace::tests +``` +Expected: 16 PASS. + +- [ ] **Step 3.5: Commit** + +```bash +git add src/cmds/java/stack_trace.rs +git commit -m "feat(mvn): classify stack frames as application vs framework + +Structural lines (Suppressed:, indented Caused by:) are always +preserved during frame collapsing." +``` + +--- + +## Task 4: Frame collapsing — `add_collapsed_frames` + +**Files:** +- Modify: `src/cmds/java/stack_trace.rs` + +- [ ] **Step 4.1: Add tests** + +Append to tests: +```rust + fn collect_collapsed(frames: &[&str], app_package: Option<&str>) -> Vec { + let frames: Vec = frames.iter().map(|s| s.to_string()).collect(); + let mut out = Vec::new(); + add_collapsed_frames(&mut out, &frames, app_package); + out + } + + #[test] + fn collapse_no_filter_keeps_everything() { + let frames = [ + "\tat org.framework.Foo(Foo.java:1)", + "\tat com.example.A.foo(A.java:1)", + "\tat org.framework.Bar(Bar.java:2)", + ]; + let out = collect_collapsed(&frames, None); + assert_eq!(out.len(), 3); + } + + #[test] + fn collapse_all_framework_yields_single_summary() { + let frames = [ + "\tat org.framework.Foo(Foo.java:1)", + "\tat org.framework.Bar(Bar.java:2)", + "\tat org.framework.Baz(Baz.java:3)", + ]; + let out = collect_collapsed(&frames, Some("com.example")); + assert_eq!(out, vec!["\t... 3 framework frames omitted"]); + } + + #[test] + fn collapse_alternating_produces_multiple_summaries() { + let frames = [ + "\tat org.framework.Foo(Foo.java:1)", + "\tat com.example.A.one(A.java:10)", + "\tat org.framework.Bar(Bar.java:2)", + "\tat org.framework.Baz(Baz.java:3)", + "\tat com.example.B.two(B.java:20)", + ]; + let out = collect_collapsed(&frames, Some("com.example")); + assert_eq!( + out, + vec![ + "\t... 1 framework frames omitted", + "\tat com.example.A.one(A.java:10)", + "\t... 2 framework frames omitted", + "\tat com.example.B.two(B.java:20)", + ] + ); + } + + #[test] + fn collapse_preserves_structural_inline() { + let frames = [ + "\tat org.framework.Foo(Foo.java:1)", + "\tSuppressed: java.io.IOException", + "\t\tat org.framework.Bar(Bar.java:2)", + "\t\tCaused by: java.lang.Error: nested", + ]; + let out = collect_collapsed(&frames, Some("com.example")); + assert_eq!( + out, + vec![ + "\t... 1 framework frames omitted", + "\tSuppressed: java.io.IOException", + "\t... 1 framework frames omitted", + "\t\tCaused by: java.lang.Error: nested", + ] + ); + } +``` + +- [ ] **Step 4.2: Run tests to verify they fail** + +Run: +```bash +cargo test --lib stack_trace::tests::collapse +``` +Expected: FAIL — `add_collapsed_frames` not defined. + +- [ ] **Step 4.3: Implement `add_collapsed_frames`** + +Add below `is_structural_line`: +```rust +/// Push frames to `output`, collapsing runs of consecutive framework frames +/// into a single `"\t... N framework frames omitted"` marker. +/// +/// When `app_package` is `None`, all frames are considered app frames and no +/// collapsing occurs — pass-through mode. +pub(crate) fn add_collapsed_frames( + output: &mut Vec, + frames: &[String], + app_package: Option<&str>, +) { + let filter = app_package.is_some_and(|p| !p.is_empty()); + if !filter { + for frame in frames { + output.push(frame.clone()); + } + return; + } + + let mut framework_count: usize = 0; + for frame in frames { + let structural = is_structural_line(frame); + if structural || is_application_frame(frame, app_package) { + if framework_count > 0 { + output.push(format!("\t... {framework_count} framework frames omitted")); + framework_count = 0; + } + if structural { + output.push(truncate_header(frame)); + } else { + output.push(frame.clone()); + } + } else { + framework_count += 1; + } + } + if framework_count > 0 { + output.push(format!("\t... {framework_count} framework frames omitted")); + } +} +``` + +- [ ] **Step 4.4: Run tests** + +Run: +```bash +cargo test --lib stack_trace::tests +``` +Expected: 20 PASS. + +- [ ] **Step 4.5: Commit** + +```bash +git add src/cmds/java/stack_trace.rs +git commit -m "feat(mvn): collapse consecutive framework frames + +Emits '... N framework frames omitted' for runs of non-app frames; +preserves app and structural (Suppressed / nested Caused by) frames." +``` + +--- + +## Task 5: Root-cause frame cap — `add_root_cause_frames` + +**Files:** +- Modify: `src/cmds/java/stack_trace.rs` + +- [ ] **Step 5.1: Add tests** + +Append to tests: +```rust + fn collect_root_cause(frames: &[&str], app_package: Option<&str>) -> Vec { + let frames: Vec = frames.iter().map(|s| s.to_string()).collect(); + let mut out = Vec::new(); + add_root_cause_frames(&mut out, &frames, app_package); + out + } + + #[test] + fn root_cause_caps_app_frames_at_ten() { + let mut frames = Vec::new(); + for i in 0..15 { + frames.push(format!("\tat com.example.A.m{i}(A.java:{i})")); + } + let frame_refs: Vec<&str> = frames.iter().map(|s| s.as_str()).collect(); + let out = collect_root_cause(&frame_refs, Some("com.example")); + // 10 kept, 5 dropped silently (no "framework" marker because these are app frames) + assert_eq!(out.len(), 10); + } + + #[test] + fn root_cause_no_filter_keeps_all_frames() { + let mut frames = Vec::new(); + for i in 0..15 { + frames.push(format!("\tat com.example.A.m{i}(A.java:{i})")); + } + let frame_refs: Vec<&str> = frames.iter().map(|s| s.as_str()).collect(); + let out = collect_root_cause(&frame_refs, None); + assert_eq!(out.len(), 15); + } + + #[test] + fn root_cause_structural_bypasses_cap() { + // Structural lines are always preserved, even if we already hit the 10-app cap. + let mut frames = Vec::new(); + for i in 0..10 { + frames.push(format!("\tat com.example.A.m{i}(A.java:{i})")); + } + frames.push("\tSuppressed: x".to_string()); + frames.push("\tat com.example.Z.zzz(Z.java:99)".to_string()); // 11th app — dropped + let frame_refs: Vec<&str> = frames.iter().map(|s| s.as_str()).collect(); + let out = collect_root_cause(&frame_refs, Some("com.example")); + assert_eq!(out.len(), 11, "10 app frames + 1 structural, 11th app dropped"); + assert!(out.contains(&"\tSuppressed: x".to_string())); + } + + #[test] + fn root_cause_collapses_framework_as_before() { + let frames = [ + "\tat com.example.A.foo(A.java:1)", + "\tat org.framework.X(X.java:1)", + "\tat org.framework.Y(Y.java:2)", + "\tat com.example.B.bar(B.java:2)", + ]; + let out = collect_root_cause(&frames, Some("com.example")); + assert_eq!( + out, + vec![ + "\tat com.example.A.foo(A.java:1)", + "\t... 2 framework frames omitted", + "\tat com.example.B.bar(B.java:2)", + ] + ); + } +``` + +- [ ] **Step 5.2: Run tests to verify they fail** + +Run: +```bash +cargo test --lib stack_trace::tests::root_cause +``` +Expected: FAIL — `add_root_cause_frames` not defined. + +- [ ] **Step 5.3: Implement `add_root_cause_frames`** + +Add the constant near the top of the file (after `MAX_HEADER_LENGTH`): +```rust +const DEFAULT_ROOT_CAUSE_APP_FRAMES: usize = 10; +``` + +Add below `add_collapsed_frames`: +```rust +/// Like `add_collapsed_frames`, but caps the number of non-structural +/// application frames at `DEFAULT_ROOT_CAUSE_APP_FRAMES`. Structural lines +/// (Suppressed, nested Caused by) bypass the cap. +pub(crate) fn add_root_cause_frames( + output: &mut Vec, + frames: &[String], + app_package: Option<&str>, +) { + let filter = app_package.is_some_and(|p| !p.is_empty()); + if !filter { + for frame in frames { + output.push(frame.clone()); + } + return; + } + + let mut app_count: usize = 0; + let mut framework_count: usize = 0; + for frame in frames { + let structural = is_structural_line(frame); + if structural || is_application_frame(frame, app_package) { + if framework_count > 0 { + output.push(format!("\t... {framework_count} framework frames omitted")); + framework_count = 0; + } + if structural { + output.push(truncate_header(frame)); + } else if app_count < DEFAULT_ROOT_CAUSE_APP_FRAMES { + output.push(frame.clone()); + app_count += 1; + } + } else { + framework_count += 1; + } + } + if framework_count > 0 { + output.push(format!("\t... {framework_count} framework frames omitted")); + } +} +``` + +- [ ] **Step 5.4: Run tests** + +Run: +```bash +cargo test --lib stack_trace::tests +``` +Expected: 24 PASS. + +- [ ] **Step 5.5: Commit** + +```bash +git add src/cmds/java/stack_trace.rs +git commit -m "feat(mvn): cap root cause application frames at 10 + +Structural lines (Suppressed / nested Caused by) bypass the cap and +are always preserved." +``` + +--- + +## Task 6: `process` orchestrator (no hard cap yet) + +**Files:** +- Modify: `src/cmds/java/stack_trace.rs` + +- [ ] **Step 6.1: Add tests** + +Append to tests: +```rust + #[test] + fn process_empty_returns_none() { + assert!(process("", Some("com.example"), 0).is_none()); + assert!(process(" \n ", Some("com.example"), 0).is_none()); + } + + #[test] + fn process_single_segment_no_filter_returns_verbatim() { + let trace = "java.lang.RuntimeException: boom\n\tat com.example.A.foo(A.java:1)"; + let out = process(trace, None, 0).unwrap(); + assert_eq!(out, trace); + } + + #[test] + fn process_single_segment_collapses_framework() { + let trace = "java.lang.AssertionError: fail\n\ + \tat com.example.Test.t(Test.java:5)\n\ + \tat org.junit.runner.Run(Run.java:1)\n\ + \tat org.junit.runner.Run(Run.java:2)"; + let out = process(trace, Some("com.example"), 0).unwrap(); + assert_eq!( + out, + "java.lang.AssertionError: fail\n\ + \tat com.example.Test.t(Test.java:5)\n\ + \t... 2 framework frames omitted" + ); + } + + #[test] + fn process_multi_segment_preserves_root_cause() { + let trace = "java.lang.RuntimeException: outer\n\ + \tat org.spring.Foo(Foo.java:1)\n\ + Caused by: java.io.IOException: middle\n\ + \tat org.hibernate.Bar(Bar.java:2)\n\ + Caused by: java.net.ConnectException: inner\n\ + \tat com.example.DbService.connect(DbService.java:42)"; + let out = process(trace, Some("com.example"), 0).unwrap(); + assert!(out.contains("java.lang.RuntimeException: outer")); + assert!(out.contains("Caused by: java.io.IOException: middle")); + assert!(out.contains("Caused by: java.net.ConnectException: inner")); + assert!(out.contains("\tat com.example.DbService.connect(DbService.java:42)")); + assert!(out.contains("framework frames omitted")); + } +``` + +- [ ] **Step 6.2: Run tests to verify they fail** + +Run: +```bash +cargo test --lib stack_trace::tests::process +``` +Expected: FAIL — `process` not defined. + +- [ ] **Step 6.3: Implement `process` (hard cap as identity for now)** + +Add below `add_root_cause_frames`: +```rust +/// Process a Java stack trace: +/// - Top-level header preserved (truncated to 200 chars). +/// - Non-root segments: header + `add_collapsed_frames`. +/// - Root (last) segment: header + `add_root_cause_frames`. +/// - If `max_lines > 0` and output exceeds the cap, apply hard-cap truncation +/// (implemented in a later task — currently returns full output). +/// +/// Returns `None` iff `raw` is empty or whitespace-only. +pub fn process(raw: &str, app_package: Option<&str>, max_lines: usize) -> Option { + let trimmed = raw.trim(); + if trimmed.is_empty() { + return None; + } + + let segments = parse_segments(trimmed); + if segments.is_empty() { + return Some(trimmed.to_string()); + } + + let mut out: Vec = Vec::new(); + out.push(truncate_header(&segments[0].header)); + + if segments.len() == 1 { + add_collapsed_frames(&mut out, &segments[0].frames, app_package); + } else { + add_collapsed_frames(&mut out, &segments[0].frames, app_package); + for seg in &segments[1..segments.len() - 1] { + out.push(truncate_header(&seg.header)); + add_collapsed_frames(&mut out, &seg.frames, app_package); + } + let root = segments.last().unwrap(); + out.push(truncate_header(&root.header)); + add_root_cause_frames(&mut out, &root.frames, app_package); + } + + if max_lines > 0 && out.len() > max_lines { + out = apply_hard_cap(out, &segments, max_lines); + } + + Some(out.join("\n")) +} + +// Temporary stub; real implementation in Task 7. +fn apply_hard_cap(out: Vec, _segments: &[Segment], max_lines: usize) -> Vec { + let mut out = out; + out.truncate(max_lines); + out +} +``` + +- [ ] **Step 6.4: Run tests** + +Run: +```bash +cargo test --lib stack_trace::tests +``` +Expected: 28 PASS. + +- [ ] **Step 6.5: Commit** + +```bash +git add src/cmds/java/stack_trace.rs +git commit -m "feat(mvn): stack trace process orchestrator + +Wires parse_segments, add_collapsed_frames, add_root_cause_frames into +the public process(raw, app_package, max_lines) API. Hard cap stubbed +for next task." +``` + +--- + +## Task 7: `apply_hard_cap` with root-cause preservation + +**Files:** +- Modify: `src/cmds/java/stack_trace.rs` + +- [ ] **Step 7.1: Add tests** + +Append to tests: +```rust + #[test] + fn hard_cap_single_segment_simple_truncate() { + let mut trace = String::from("java.lang.RuntimeException: boom"); + for i in 0..20 { + trace.push_str(&format!("\n\tat com.example.A.m{i}(A.java:{i})")); + } + let out = process(&trace, Some("com.example"), 5).unwrap(); + assert_eq!(out.lines().count(), 5); + } + + #[test] + fn hard_cap_multi_segment_preserves_root_cause() { + // Top header + 50 intermediate frames + Caused by: + 5 root frames, + // cap at 10 lines → must still include Caused by: line and at least + // one root frame. + let mut trace = String::from("java.lang.RuntimeException: outer"); + for i in 0..50 { + trace.push_str(&format!("\n\tat org.spring.A.m{i}(A.java:{i})")); + } + trace.push_str("\nCaused by: java.io.IOException: real cause"); + for i in 0..5 { + trace.push_str(&format!("\n\tat com.example.DbService.m{i}(Db.java:{i})")); + } + + let out = process(&trace, Some("com.example"), 10).unwrap(); + assert_eq!(out.lines().count(), 10); + assert!(out.contains("java.lang.RuntimeException: outer")); + assert!(out.contains("Caused by: java.io.IOException: real cause")); + assert!( + out.contains("com.example.DbService"), + "at least one root-cause app frame must survive, got: {out}" + ); + } + + #[test] + fn hard_cap_multi_segment_root_within_limit_straight_truncate() { + // Root cause header at line 3 of output, cap at 10 → straight truncate. + let trace = "java.lang.RuntimeException: outer\n\ + \tat com.example.A.foo(A.java:1)\n\ + Caused by: java.io.IOException: inner\n\ + \tat com.example.B.bar(B.java:1)\n\ + \tat com.example.B.baz(B.java:2)\n\ + \tat com.example.B.qux(B.java:3)\n\ + \tat com.example.B.quux(B.java:4)\n\ + \tat com.example.B.corge(B.java:5)"; + let out = process(trace, Some("com.example"), 6).unwrap(); + assert_eq!(out.lines().count(), 6); + } +``` + +- [ ] **Step 7.2: Run tests to verify they fail** + +Run: +```bash +cargo test --lib stack_trace::tests::hard_cap +``` +Expected: `hard_cap_multi_segment_preserves_root_cause` likely FAILS (stub just truncates). + +- [ ] **Step 7.3: Replace `apply_hard_cap` stub with real implementation** + +Remove the stub `fn apply_hard_cap(...)` and replace with: +```rust +/// Apply a hard cap while preserving the root cause. +/// +/// - For a single segment: straight truncate to `max_lines`. +/// - For multiple segments: +/// - If the root-cause header's index in `out` is already beyond the cap, +/// build a synthetic output: `[top_header, "... (intermediate frames +/// truncated)", root_header, root frames until cap]`. +/// - Otherwise (root-cause header within the cap): straight truncate. +fn apply_hard_cap(out: Vec, segments: &[Segment], max_lines: usize) -> Vec { + if segments.len() <= 1 { + let mut out = out; + out.truncate(max_lines); + return out; + } + + let root = segments.last().unwrap(); + let truncated_root_header = truncate_header(&root.header); + let root_idx = out + .iter() + .rposition(|line| line == &truncated_root_header); + + let Some(idx) = root_idx else { + let mut out = out; + out.truncate(max_lines); + return out; + }; + + if idx < max_lines.saturating_sub(1) { + let mut out = out; + out.truncate(max_lines); + return out; + } + + // Root cause beyond the cap — build synthetic layout. + let mut result: Vec = Vec::with_capacity(max_lines); + if let Some(top) = out.first() { + result.push(top.clone()); + } + if max_lines >= 3 { + result.push("\t... (intermediate frames truncated)".to_string()); + } + result.push(truncated_root_header.clone()); + + let mut remaining = max_lines.saturating_sub(result.len()); + for line in &out[(idx + 1)..] { + if remaining == 0 { + break; + } + result.push(line.clone()); + remaining -= 1; + } + result +} +``` + +- [ ] **Step 7.4: Run tests** + +Run: +```bash +cargo test --lib stack_trace +``` +Expected: 31 PASS (all stack_trace tests). + +- [ ] **Step 7.5: Commit** + +```bash +git add src/cmds/java/stack_trace.rs +git commit -m "feat(mvn): stack trace hard cap preserves root cause + +When root-cause header lies beyond the line cap, emit a synthetic layout +with a truncated-intermediate marker so the diagnostic punchline survives." +``` + +--- + +## Task 8: Copy Surefire XML fixtures from maven-mcp + +**Files:** +- Create: `tests/fixtures/java/surefire-reports/TEST-com.example.PassingTest.xml` +- Create: `tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml` +- Create: `tests/fixtures/java/surefire-reports/TEST-com.example.FailingTestWithLogs.xml` +- Create: `tests/fixtures/java/surefire-reports/TEST-com.example.SkippedTest.xml` +- Create: `tests/fixtures/java/surefire-reports/TEST-com.example.ErrorTest.xml` + +- [ ] **Step 8.1: Copy fixtures verbatim from maven-mcp** + +Run: +```bash +cp /home/mariusz/projects/maven-mcp/src/test/resources/surefire-reports/TEST-com.example.PassingTest.xml tests/fixtures/java/surefire-reports/ +cp /home/mariusz/projects/maven-mcp/src/test/resources/surefire-reports/TEST-com.example.FailingTest.xml tests/fixtures/java/surefire-reports/ +cp /home/mariusz/projects/maven-mcp/src/test/resources/surefire-reports/TEST-com.example.FailingTestWithLogs.xml tests/fixtures/java/surefire-reports/ +cp /home/mariusz/projects/maven-mcp/src/test/resources/surefire-reports/TEST-com.example.SkippedTest.xml tests/fixtures/java/surefire-reports/ +cp /home/mariusz/projects/maven-mcp/src/test/resources/surefire-reports/TEST-com.example.ErrorTest.xml tests/fixtures/java/surefire-reports/ +``` + +Note: `.gitkeep` previously created can now be deleted: +```bash +rm tests/fixtures/java/surefire-reports/.gitkeep +``` + +- [ ] **Step 8.2: Verify all 5 fixtures present** + +Run: +```bash +ls tests/fixtures/java/surefire-reports/ +``` +Expected: 5 files, all `TEST-com.example.*.xml`. + +- [ ] **Step 8.3: Commit** + +```bash +git add tests/fixtures/java/surefire-reports/ +git commit -m "test(mvn): copy Surefire XML fixtures from maven-mcp + +Covers passing, failing, failing-with-logs, skipped, and error cases — +will feed surefire_reports parser tests in the next tasks." +``` + +--- + +## Task 9: `surefire_reports` — types and single-file parsing + +**Files:** +- Modify: `src/cmds/java/surefire_reports.rs` + +- [ ] **Step 9.1: Add types and a failing test** + +Replace the contents of `src/cmds/java/surefire_reports.rs` with: +```rust +//! Parses Maven Surefire/Failsafe XML test reports from +//! `target/surefire-reports/TEST-*.xml` and `target/failsafe-reports/*.xml`. +//! Uses quick-xml streaming parser. Time-gated by `started_at` to skip stale +//! reports from previous runs. + +use crate::cmds::java::stack_trace; +use quick_xml::events::{BytesStart, Event}; +use quick_xml::Reader; +use std::path::Path; +use std::time::SystemTime; + +pub const DEFAULT_STACK_TRACE_LINES: usize = 50; +pub const DEFAULT_PER_TEST_OUTPUT_LIMIT: usize = 2000; +pub const DEFAULT_TOTAL_OUTPUT_LIMIT: usize = 10_000; + +#[derive(Debug, Default, PartialEq)] +pub struct TestSummary { + pub run: u32, + pub failures: u32, + pub errors: u32, + pub skipped: u32, +} + +impl TestSummary { + fn add(&mut self, other: &Self) { + self.run += other.run; + self.failures += other.failures; + self.errors += other.errors; + self.skipped += other.skipped; + } +} + +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum FailureKind { + Failure, + Error, +} + +#[derive(Debug, PartialEq)] +pub struct TestFailure { + pub test_class: String, + pub test_method: String, + pub kind: FailureKind, + pub message: Option, + pub failure_type: Option, + pub stack_trace: Option, + pub test_output: Option, +} + +#[derive(Debug, Default, PartialEq)] +pub struct SurefireResult { + pub summary: TestSummary, + pub failures: Vec, + pub files_read: usize, + pub files_skipped_stale: usize, + pub files_malformed: usize, +} + +fn local_name(name: &[u8]) -> &[u8] { + name.rsplit(|b| *b == b':').next().unwrap_or(name) +} + +fn extract_attr( + reader: &Reader<&[u8]>, + start: &BytesStart<'_>, + key: &[u8], +) -> Option { + for attr in start.attributes().flatten() { + if local_name(attr.key.as_ref()) != key { + continue; + } + if let Ok(value) = attr.decode_and_unescape_value(reader.decoder()) { + return Some(value.into_owned()); + } + } + None +} + +fn parse_u32_attr(reader: &Reader<&[u8]>, start: &BytesStart<'_>, key: &[u8]) -> u32 { + extract_attr(reader, start, key) + .and_then(|v| v.parse::().ok()) + .unwrap_or(0) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_content_single_passing() { + let xml = include_str!( + "../../../tests/fixtures/java/surefire-reports/TEST-com.example.PassingTest.xml" + ); + let result = parse_content(xml, None).expect("passing testsuite parses"); + assert!(result.summary.run >= 1); + assert_eq!(result.summary.failures, 0); + assert_eq!(result.summary.errors, 0); + assert!(result.failures.is_empty()); + } + + #[test] + fn parse_content_single_failing_extracts_details() { + let xml = include_str!( + "../../../tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml" + ); + let result = parse_content(xml, None).expect("failing testsuite parses"); + assert_eq!(result.summary.failures, 2); + assert_eq!(result.failures.len(), 2); + let first = &result.failures[0]; + assert_eq!(first.test_class, "com.example.FailingTest"); + assert!(first.message.as_deref().unwrap_or("").contains("expected")); + assert!(first.stack_trace.is_some()); + assert_eq!(first.kind, FailureKind::Failure); + } +} +``` + +- [ ] **Step 9.2: Run tests to verify they fail** + +Run: +```bash +cargo test --lib surefire_reports::tests +``` +Expected: FAIL — `parse_content` not defined. + +- [ ] **Step 9.3: Implement `parse_content`** + +Add below `parse_u32_attr`: +```rust +/// Parse a single Surefire XML testsuite string into a partial result. +/// `app_package` is passed to `stack_trace::process` for frame classification. +/// +/// Returns `None` only if the XML is completely malformed; otherwise a +/// best-effort result is returned. +pub(crate) fn parse_content(xml: &str, app_package: Option<&str>) -> Option { + #[derive(Clone, Copy, PartialEq)] + enum CaptureField { + StackTrace, + SystemOut, + SystemErr, + } + + let mut reader = Reader::from_str(xml); + reader.config_mut().trim_text(false); + let mut buf = Vec::new(); + + let mut result = SurefireResult::default(); + let mut saw_testsuite = false; + let mut current_class: Option = None; + let mut current_method: Option = None; + let mut current_has_failure = false; + + let mut pending_message: Option = None; + let mut pending_type: Option = None; + let mut pending_kind: Option = None; + let mut stack_buf = String::new(); + let mut stdout_buf = String::new(); + let mut stderr_buf = String::new(); + let mut capture: Option = None; + + loop { + match reader.read_event_into(&mut buf) { + Ok(Event::Start(e)) | Ok(Event::Empty(e)) => { + match local_name(e.name().as_ref()) { + b"testsuite" => { + saw_testsuite = true; + let file_summary = TestSummary { + run: parse_u32_attr(&reader, &e, b"tests"), + failures: parse_u32_attr(&reader, &e, b"failures"), + errors: parse_u32_attr(&reader, &e, b"errors"), + skipped: parse_u32_attr(&reader, &e, b"skipped"), + }; + result.summary.add(&file_summary); + } + b"testcase" => { + current_class = extract_attr(&reader, &e, b"classname"); + current_method = extract_attr(&reader, &e, b"name"); + current_has_failure = false; + } + b"failure" | b"error" => { + let kind = if local_name(e.name().as_ref()) == b"failure" { + FailureKind::Failure + } else { + FailureKind::Error + }; + pending_message = extract_attr(&reader, &e, b"message"); + pending_type = extract_attr(&reader, &e, b"type"); + pending_kind = Some(kind); + stack_buf.clear(); + capture = Some(CaptureField::StackTrace); + current_has_failure = true; + } + b"system-out" if current_has_failure => { + stdout_buf.clear(); + capture = Some(CaptureField::SystemOut); + } + b"system-err" if current_has_failure => { + stderr_buf.clear(); + capture = Some(CaptureField::SystemErr); + } + _ => {} + } + } + Ok(Event::Text(t)) => { + if let Some(field) = capture { + if let Ok(text) = t.unescape() { + match field { + CaptureField::StackTrace => stack_buf.push_str(&text), + CaptureField::SystemOut => stdout_buf.push_str(&text), + CaptureField::SystemErr => stderr_buf.push_str(&text), + } + } + } + } + Ok(Event::End(e)) => { + match local_name(e.name().as_ref()) { + b"failure" | b"error" => { + let processed = stack_trace::process( + stack_buf.trim(), + app_package, + DEFAULT_STACK_TRACE_LINES, + ); + result.failures.push(TestFailure { + test_class: current_class.clone().unwrap_or_default(), + test_method: current_method.clone().unwrap_or_default(), + kind: pending_kind.take().unwrap_or(FailureKind::Failure), + message: pending_message + .take() + .filter(|s| !s.is_empty()) + .map(|s| stack_trace::truncate_header(&s)), + failure_type: pending_type.take().filter(|s| !s.is_empty()), + stack_trace: processed, + test_output: None, // filled on + }); + capture = None; + } + b"system-out" | b"system-err" => { + capture = None; + } + b"testcase" => { + let combined = combine_test_output( + &stdout_buf, + &stderr_buf, + DEFAULT_PER_TEST_OUTPUT_LIMIT, + ); + stdout_buf.clear(); + stderr_buf.clear(); + if let Some(combined) = combined { + if let Some(last) = result.failures.last_mut() { + if last.test_class == current_class.clone().unwrap_or_default() + && last.test_method + == current_method.clone().unwrap_or_default() + { + last.test_output = Some(combined); + } + } + } + current_class = None; + current_method = None; + current_has_failure = false; + } + _ => {} + } + } + Ok(Event::Eof) => break, + Err(_) => return None, + _ => {} + } + buf.clear(); + } + + if !saw_testsuite { + return None; + } + + Some(result) +} + +fn combine_test_output(stdout: &str, stderr: &str, per_test_limit: usize) -> Option { + let stdout = stdout.trim(); + let stderr = stderr.trim(); + if stdout.is_empty() && stderr.is_empty() { + return None; + } + let mut combined = String::new(); + if !stdout.is_empty() { + combined.push_str(stdout); + } + if !stderr.is_empty() { + if !combined.is_empty() { + combined.push_str("\n[STDERR]\n"); + } else { + combined.push_str("[STDERR]\n"); + } + combined.push_str(stderr); + } + Some(truncate_test_output(&combined, per_test_limit)) +} + +fn truncate_test_output(output: &str, max_chars: usize) -> String { + let char_count = output.chars().count(); + if char_count <= max_chars { + return output.to_string(); + } + let skip = char_count - max_chars; + let tail: String = output.chars().skip(skip).collect(); + format!("... ({skip} chars truncated)\n{tail}") +} +``` + +Expose `truncate_header` from `stack_trace.rs` (change `pub(crate) fn truncate_header` — already `pub(crate)`, so it's available). If not already `pub(crate)`, add it now. + +- [ ] **Step 9.4: Run tests** + +Run: +```bash +cargo test --lib surefire_reports::tests +``` +Expected: 2 PASS. + +- [ ] **Step 9.5: Commit** + +```bash +git add src/cmds/java/surefire_reports.rs +git commit -m "feat(mvn): parse Surefire XML testsuite via quick-xml + +Handles testsuite/testcase/failure/error/system-out/system-err with +per-test 2000-char log limit and 50-line stack trace truncation. +Classifies failure vs error by element name." +``` + +--- + +## Task 10: `surefire_reports` — system-out / system-err capture test + +**Files:** +- Modify: `src/cmds/java/surefire_reports.rs` + +- [ ] **Step 10.1: Add tests** + +Append to `tests` module in `surefire_reports.rs`: +```rust + #[test] + fn parse_content_captures_system_out_err_only_for_failed_tests() { + let xml = include_str!( + "../../../tests/fixtures/java/surefire-reports/TEST-com.example.FailingTestWithLogs.xml" + ); + let result = parse_content(xml, None).expect("parses"); + assert_eq!(result.failures.len(), 2); + let with_both_streams = result + .failures + .iter() + .find(|f| f.test_method == "shouldConnectToDb") + .expect("shouldConnectToDb present"); + let output = with_both_streams + .test_output + .as_deref() + .expect("test_output captured"); + assert!(output.contains("Initializing connection pool")); + assert!(output.contains("[STDERR]")); + assert!(output.contains("Connection refused")); + + let with_stdout_only = result + .failures + .iter() + .find(|f| f.test_method == "shouldProcessData") + .expect("shouldProcessData present"); + let output = with_stdout_only.test_output.as_deref().unwrap_or(""); + assert!(output.contains("Processing batch")); + assert!(!output.contains("[STDERR]")); + + // Passing test's must NOT be captured + let passing_system_out_text = "This output belongs to a passing test"; + for failure in &result.failures { + if let Some(out) = &failure.test_output { + assert!( + !out.contains(passing_system_out_text), + "passing-test stdout must not leak into a failure's test_output" + ); + } + } + } + + #[test] + fn parse_content_error_testsuite_marks_failure_kind_error() { + let xml = include_str!( + "../../../tests/fixtures/java/surefire-reports/TEST-com.example.ErrorTest.xml" + ); + let result = parse_content(xml, None).expect("parses"); + assert!(result.failures.iter().any(|f| f.kind == FailureKind::Error)); + } + + #[test] + fn parse_content_skipped_testsuite_counts_skipped() { + let xml = include_str!( + "../../../tests/fixtures/java/surefire-reports/TEST-com.example.SkippedTest.xml" + ); + let result = parse_content(xml, None).expect("parses"); + assert!(result.summary.skipped > 0); + } +``` + +- [ ] **Step 10.2: Run tests** + +Run: +```bash +cargo test --lib surefire_reports::tests +``` +Expected: 5 PASS (existing 2 + new 3). If `system-out`/`system-err` capture fails, check that `parse_content` only opens capture inside `testcase` AFTER `current_has_failure == true` — this mirrors maven-mcp behavior. + +- [ ] **Step 10.3: Commit** + +```bash +git add src/cmds/java/surefire_reports.rs +git commit -m "test(mvn): cover Surefire system-out/err capture and kinds + +Asserts passing test system-out is not leaked, error vs failure kinds +are distinguished, and skipped counts are preserved." +``` + +--- + +## Task 11: `surefire_reports::parse_dir` with time-gate + +**Files:** +- Modify: `src/cmds/java/surefire_reports.rs` + +- [ ] **Step 11.1: Add tests** + +Append to tests: +```rust + use std::time::{Duration, SystemTime}; + + fn copy_fixture( + tmp: &tempfile::TempDir, + fixture_name: &str, + mtime: Option, + ) -> std::path::PathBuf { + let src = std::path::Path::new("tests/fixtures/java/surefire-reports").join(fixture_name); + let dst = tmp.path().join(fixture_name); + std::fs::copy(&src, &dst).expect("copy fixture"); + if let Some(mtime) = mtime { + filetime::set_file_mtime(&dst, filetime::FileTime::from_system_time(mtime)) + .expect("set mtime"); + } + dst + } + + #[test] + fn parse_dir_missing_returns_none() { + assert!(super::parse_dir( + std::path::Path::new("/definitely/does/not/exist/rtk-test"), + None, + None + ) + .is_none()); + } + + #[test] + fn parse_dir_empty_returns_none() { + let tmp = tempfile::tempdir().unwrap(); + assert!(super::parse_dir(tmp.path(), None, None).is_none()); + } + + #[test] + fn parse_dir_ignores_non_test_prefix_files() { + let tmp = tempfile::tempdir().unwrap(); + copy_fixture(&tmp, "TEST-com.example.PassingTest.xml", None); + std::fs::write(tmp.path().join("summary.xml"), "").unwrap(); + std::fs::write(tmp.path().join("other.txt"), "hi").unwrap(); + + let result = super::parse_dir(tmp.path(), None, None).expect("parses"); + assert_eq!(result.files_read, 1); + } + + #[test] + fn parse_dir_aggregates_multi_file_counts() { + let tmp = tempfile::tempdir().unwrap(); + copy_fixture(&tmp, "TEST-com.example.PassingTest.xml", None); + copy_fixture(&tmp, "TEST-com.example.FailingTest.xml", None); + copy_fixture(&tmp, "TEST-com.example.SkippedTest.xml", None); + + let result = super::parse_dir(tmp.path(), None, None).expect("parses"); + assert_eq!(result.files_read, 3); + assert!(result.summary.run >= 3); + assert!(result.summary.failures >= 2); + assert!(result.summary.skipped >= 1); + } + + #[test] + fn parse_dir_time_gate_skips_stale_files() { + let tmp = tempfile::tempdir().unwrap(); + let now = SystemTime::now(); + let stale = now - Duration::from_secs(60 * 60); // 1h ago + let fresh = now + Duration::from_millis(50); + + copy_fixture(&tmp, "TEST-com.example.PassingTest.xml", Some(stale)); + copy_fixture(&tmp, "TEST-com.example.FailingTest.xml", Some(fresh)); + + let since = now; + let result = super::parse_dir(tmp.path(), Some(since), None).expect("parses"); + assert_eq!(result.files_read, 1, "only the fresh file counts"); + assert_eq!(result.files_skipped_stale, 1); + assert_eq!(result.summary.failures, 2, "from FailingTest only"); + } + + #[test] + fn parse_dir_malformed_counts_but_continues() { + let tmp = tempfile::tempdir().unwrap(); + copy_fixture(&tmp, "TEST-com.example.PassingTest.xml", None); + std::fs::write( + tmp.path().join("TEST-com.example.Broken.xml"), + ">>>", + ) + .unwrap(); + + let result = super::parse_dir(tmp.path(), None, None).expect("parses"); + assert_eq!(result.files_read, 1); + assert_eq!(result.files_malformed, 1); + } +``` + +- [ ] **Step 11.2: Run tests to verify they fail** + +Run: +```bash +cargo test --lib surefire_reports::tests::parse_dir +``` +Expected: FAIL — `parse_dir` not defined. + +- [ ] **Step 11.3: Implement `parse_dir`** + +Add below `truncate_test_output`: +```rust +/// Scan a directory for `TEST-*.xml` files and merge their parsed results. +/// +/// - Files whose `mtime < since` are skipped and counted in `files_skipped_stale`. +/// - Files that parse to `None` (malformed) count in `files_malformed`. +/// - Returns `None` only if the directory does not exist or is empty. +pub fn parse_dir( + dir: &Path, + since: Option, + app_package: Option<&str>, +) -> Option { + parse_dir_with_limits( + dir, + since, + app_package, + DEFAULT_PER_TEST_OUTPUT_LIMIT, + DEFAULT_TOTAL_OUTPUT_LIMIT, + DEFAULT_STACK_TRACE_LINES, + ) +} + +pub fn parse_dir_with_limits( + dir: &Path, + since: Option, + app_package: Option<&str>, + _per_test_output_limit: usize, + total_output_limit: usize, + _stack_trace_lines: usize, +) -> Option { + if !dir.exists() || !dir.is_dir() { + return None; + } + + let entries = std::fs::read_dir(dir).ok()?; + let mut aggregate = SurefireResult::default(); + let mut any_candidate = false; + + for entry in entries.flatten() { + let path = entry.path(); + let Some(name) = path.file_name().and_then(|s| s.to_str()) else { + continue; + }; + if !name.starts_with("TEST-") || !name.ends_with(".xml") { + continue; + } + any_candidate = true; + + if let Some(since) = since { + let modified = entry.metadata().ok().and_then(|m| m.modified().ok()); + match modified { + Some(m) if m >= since => {} + Some(_) => { + aggregate.files_skipped_stale += 1; + continue; + } + None => { + aggregate.files_skipped_stale += 1; + continue; + } + } + } + + let Ok(content) = std::fs::read_to_string(&path) else { + aggregate.files_malformed += 1; + eprintln!("rtk mvn: skipping unreadable {}", name); + continue; + }; + + match parse_content(&content, app_package) { + Some(file_result) => { + aggregate.files_read += 1; + aggregate.summary.add(&file_result.summary); + aggregate.failures.extend(file_result.failures); + } + None => { + aggregate.files_malformed += 1; + eprintln!("rtk mvn: skipping malformed {}", name); + } + } + } + + if !any_candidate { + return None; + } + + apply_total_output_limit(&mut aggregate.failures, total_output_limit); + Some(aggregate) +} + +fn apply_total_output_limit(failures: &mut [TestFailure], total_limit: usize) { + let mut budget = total_limit; + let mut exhausted = false; + for failure in failures.iter_mut() { + if exhausted { + failure.test_output = None; + continue; + } + if let Some(out) = &failure.test_output { + let len = out.chars().count(); + if len > budget { + failure.test_output = None; + exhausted = true; + } else { + budget -= len; + } + } + } +} +``` + +- [ ] **Step 11.4: Run tests** + +Run: +```bash +cargo test --lib surefire_reports::tests +``` +Expected: 11 PASS. + +- [ ] **Step 11.5: Commit** + +```bash +git add src/cmds/java/surefire_reports.rs +git commit -m "feat(mvn): surefire parse_dir with mtime time-gate + +Aggregates TEST-*.xml files; filters stale by mtime >= since; counts +malformed files without crashing. Applies total-output-limit across +failures." +``` + +--- + +## Task 12: `surefire_reports` — total-output-limit test + +**Files:** +- Modify: `src/cmds/java/surefire_reports.rs` + +- [ ] **Step 12.1: Add test** + +Append to tests: +```rust + #[test] + fn apply_total_output_limit_nulls_out_excess() { + let mut failures = vec![ + TestFailure { + test_class: "A".into(), + test_method: "m1".into(), + kind: FailureKind::Failure, + message: None, + failure_type: None, + stack_trace: None, + test_output: Some("a".repeat(4000)), + }, + TestFailure { + test_class: "A".into(), + test_method: "m2".into(), + kind: FailureKind::Failure, + message: None, + failure_type: None, + stack_trace: None, + test_output: Some("b".repeat(4000)), + }, + TestFailure { + test_class: "A".into(), + test_method: "m3".into(), + kind: FailureKind::Failure, + message: None, + failure_type: None, + stack_trace: None, + test_output: Some("c".repeat(4000)), + }, + ]; + super::apply_total_output_limit(&mut failures, 10_000); + assert!(failures[0].test_output.is_some()); + assert!(failures[1].test_output.is_some()); + assert!( + failures[2].test_output.is_none(), + "third should exceed 10k cumulative" + ); + } +``` + +- [ ] **Step 12.2: Run tests** + +Run: +```bash +cargo test --lib surefire_reports::tests::apply_total_output_limit +``` +Expected: PASS (logic already implemented in Task 11). + +- [ ] **Step 12.3: Commit** + +```bash +git add src/cmds/java/surefire_reports.rs +git commit -m "test(mvn): pin total-output-limit cutoff behavior + +Asserts the third 4KB test_output is nulled when 10000-char budget +is exhausted." +``` + +--- + +## Task 13: `pom_groupid::detect` — core algorithm + +**Files:** +- Create: `tests/fixtures/java/poms/single-module-pom.xml` +- Create: `tests/fixtures/java/poms/child-pom.xml` +- Create: `tests/fixtures/java/poms/no-groupid-pom.xml` +- Modify: `src/cmds/java/pom_groupid.rs` + +- [ ] **Step 13.1: Create POM fixtures** + +Create `tests/fixtures/java/poms/single-module-pom.xml`: +```xml + + + 4.0.0 + com.example.app + single + 1.0.0 + +``` + +Create `tests/fixtures/java/poms/child-pom.xml`: +```xml + + + 4.0.0 + + com.example.parent + parent + 1.0.0 + + child + +``` + +Create `tests/fixtures/java/poms/no-groupid-pom.xml`: +```xml + + + 4.0.0 + orphan + 1.0.0 + +``` + +Remove `.gitkeep`: +```bash +rm tests/fixtures/java/poms/.gitkeep +``` + +- [ ] **Step 13.2: Add tests** + +Replace the body of `src/cmds/java/pom_groupid.rs` with: +```rust +//! Autodetects the application Java package from `pom.xml `. +//! Used by `surefire_reports` / `stack_trace` to classify application frames. +//! Can be overridden by `RTK_MVN_APP_PACKAGE` env var. + +use quick_xml::events::Event; +use quick_xml::Reader; +use std::path::Path; + +const OVERRIDE_ENV: &str = "RTK_MVN_APP_PACKAGE"; + +/// Detect the Maven groupId of `cwd`'s `pom.xml`. +/// +/// Resolution order: +/// 1. If env var `RTK_MVN_APP_PACKAGE` is set and non-empty, return it. +/// 2. Read `cwd/pom.xml` and extract top-level `/`. +/// 3. Fall back to `//`. +/// 4. Otherwise `None`. +pub fn detect(cwd: &Path) -> Option { + if let Ok(value) = std::env::var(OVERRIDE_ENV) { + let trimmed = value.trim(); + if !trimmed.is_empty() { + return Some(trimmed.to_string()); + } + } + + let pom_path = cwd.join("pom.xml"); + let content = std::fs::read_to_string(&pom_path).ok()?; + extract_groupid(&content) +} + +pub(crate) fn extract_groupid(xml: &str) -> Option { + let mut reader = Reader::from_str(xml); + reader.config_mut().trim_text(true); + let mut buf = Vec::new(); + + // Tag stack tracked as simple Vec of local names. + let mut stack: Vec = Vec::new(); + let mut top_level_groupid: Option = None; + let mut parent_groupid: Option = None; + let mut capture: Option = None; + + loop { + match reader.read_event_into(&mut buf) { + Ok(Event::Start(e)) => { + let name = std::str::from_utf8(e.name().as_ref()) + .ok() + .and_then(|s| s.rsplit(':').next()) + .unwrap_or("") + .to_string(); + stack.push(name.clone()); + + if is_top_level_groupid(&stack) || is_parent_groupid(&stack) { + capture = Some(name); + } + } + Ok(Event::Text(t)) => { + if capture.is_some() { + if let Ok(text) = t.unescape() { + let text = text.trim(); + if !text.is_empty() { + if is_top_level_groupid(&stack) && top_level_groupid.is_none() { + top_level_groupid = Some(text.to_string()); + } else if is_parent_groupid(&stack) && parent_groupid.is_none() { + parent_groupid = Some(text.to_string()); + } + } + } + } + } + Ok(Event::End(_)) => { + stack.pop(); + capture = None; + if top_level_groupid.is_some() { + break; + } + } + Ok(Event::Eof) => break, + Err(_) => return None, + _ => {} + } + buf.clear(); + } + + top_level_groupid.or(parent_groupid) +} + +fn is_top_level_groupid(stack: &[String]) -> bool { + matches!(stack.as_slice(), [project, group] if project == "project" && group == "groupId") +} + +fn is_parent_groupid(stack: &[String]) -> bool { + matches!( + stack.as_slice(), + [project, parent, group] if project == "project" && parent == "parent" && group == "groupId" + ) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn extract_single_module_groupid() { + let xml = include_str!("../../../tests/fixtures/java/poms/single-module-pom.xml"); + assert_eq!(extract_groupid(xml).as_deref(), Some("com.example.app")); + } + + #[test] + fn extract_falls_back_to_parent_groupid() { + let xml = include_str!("../../../tests/fixtures/java/poms/child-pom.xml"); + assert_eq!(extract_groupid(xml).as_deref(), Some("com.example.parent")); + } + + #[test] + fn extract_no_groupid_returns_none() { + let xml = include_str!("../../../tests/fixtures/java/poms/no-groupid-pom.xml"); + assert!(extract_groupid(xml).is_none()); + } + + #[test] + fn extract_malformed_returns_none() { + assert!(extract_groupid(">>>").is_none()); + } + + #[test] + fn detect_missing_pom_returns_none() { + let tmp = tempfile::tempdir().unwrap(); + assert!(detect(tmp.path()).is_none()); + } + + #[test] + fn detect_env_override_wins() { + let tmp = tempfile::tempdir().unwrap(); + std::fs::copy( + "tests/fixtures/java/poms/single-module-pom.xml", + tmp.path().join("pom.xml"), + ) + .unwrap(); + + // Serial to avoid concurrent env mutation with other tests — this is + // tested in isolation; we restore the var on exit. + let guard = EnvGuard::set(OVERRIDE_ENV, "com.override"); + assert_eq!(detect(tmp.path()).as_deref(), Some("com.override")); + drop(guard); + } + + struct EnvGuard { + key: &'static str, + original: Option, + } + + impl EnvGuard { + fn set(key: &'static str, value: &str) -> Self { + let original = std::env::var(key).ok(); + std::env::set_var(key, value); + Self { key, original } + } + } + + impl Drop for EnvGuard { + fn drop(&mut self) { + match &self.original { + Some(v) => std::env::set_var(self.key, v), + None => std::env::remove_var(self.key), + } + } + } +} +``` + +- [ ] **Step 13.3: Run tests** + +Run: +```bash +cargo test --lib pom_groupid::tests +``` +Expected: 6 PASS. If the env-override test is flaky under parallel test runs, keep it — `std::env::set_var` is inherently non-threadsafe in Rust but our tests don't contend on `RTK_MVN_APP_PACKAGE`, and `cargo test` runs tests in the same process. + +- [ ] **Step 13.4: Commit** + +```bash +git add src/cmds/java/pom_groupid.rs tests/fixtures/java/poms/ +git commit -m "feat(mvn): detect appPackage from pom.xml groupId + +Reads top-level / with fallback to /. +RTK_MVN_APP_PACKAGE env var overrides. Malformed POMs return None." +``` + +--- + +## Task 14: Synthesize failsafe fixtures and stack-trace fixtures + +**Files:** +- Create: `tests/fixtures/java/failsafe-reports/TEST-com.example.DbIntegrationIT.xml` +- Create: `tests/fixtures/java/failsafe-reports/TEST-com.example.PortConflictIT.xml` +- Create: `tests/fixtures/java/stack-traces/multi-caused-by.txt` + +- [ ] **Step 14.1: Create failsafe fixture with 3-segment Caused-by chain** + +Create `tests/fixtures/java/failsafe-reports/TEST-com.example.DbIntegrationIT.xml`: +```xml + + + + + java.lang.IllegalStateException: Failed to load ApplicationContext + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:180) + at org.springframework.test.context.support.DefaultTestContext.getApplicationContext(DefaultTestContext.java:124) + at org.springframework.test.context.support.DependencyInjectionTestExecutionListener.injectDependencies(DependencyInjectionTestExecutionListener.java:118) +Caused by: org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'dataSource' + at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:628) + at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:542) + at org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration.dataSource(DataSourceAutoConfiguration.java:114) +Caused by: org.hibernate.HibernateException: Unable to acquire JDBC Connection; nested exception is java.sql.SQLTransientConnectionException: HikariPool-1 - Connection is not available, request timed out after 30000ms. + at org.hibernate.internal.SessionFactoryImpl.createEntityManagerFactory(SessionFactoryImpl.java:512) + at com.example.DbIntegrationIT.shouldConnect(DbIntegrationIT.java:88) + at java.base/java.lang.reflect.Method.invoke(Method.java:580) + 2026-04-15 10:42:17 ERROR HikariDataSource - HikariPool-1 - Connection is not available +Connection refused (Connection refused) + + +``` + +- [ ] **Step 14.2: Create failsafe fixture — port conflict** + +Create `tests/fixtures/java/failsafe-reports/TEST-com.example.PortConflictIT.xml`: +```xml + + + + java.net.BindException: Address already in use + at java.base/sun.nio.ch.Net.bind0(Native Method) + at java.base/sun.nio.ch.Net.bind(Net.java:555) + at com.example.PortConflictIT.shouldStartServer(PortConflictIT.java:42) + + +``` + +Remove `.gitkeep`: +```bash +rm tests/fixtures/java/failsafe-reports/.gitkeep +``` + +- [ ] **Step 14.3: Create raw stack trace fixture** + +Create `tests/fixtures/java/stack-traces/multi-caused-by.txt`: +``` +java.lang.IllegalStateException: Failed to load ApplicationContext + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:180) + at org.springframework.test.context.support.DefaultTestContext.getApplicationContext(DefaultTestContext.java:124) +Caused by: org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'dataSource' + at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:628) + at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:542) +Caused by: org.hibernate.HibernateException: Unable to acquire JDBC Connection + at org.hibernate.internal.SessionFactoryImpl.createEntityManagerFactory(SessionFactoryImpl.java:512) + at com.example.DbIntegrationIT.shouldConnect(DbIntegrationIT.java:88) + at java.base/java.lang.reflect.Method.invoke(Method.java:580) +``` + +Remove `.gitkeep`: +```bash +rm tests/fixtures/java/stack-traces/.gitkeep +``` + +- [ ] **Step 14.4: Verify via stack_trace::process** + +Add to `src/cmds/java/stack_trace.rs` tests: +```rust + #[test] + fn process_real_world_spring_fixture() { + let trace = include_str!("../../../tests/fixtures/java/stack-traces/multi-caused-by.txt"); + let out = process(trace, Some("com.example"), 50).unwrap(); + assert!(out.contains("Caused by: org.springframework.beans.factory.BeanCreationException")); + assert!(out.contains("Caused by: org.hibernate.HibernateException")); + assert!(out.contains("com.example.DbIntegrationIT.shouldConnect")); + assert!(out.contains("framework frames omitted")); + } +``` + +- [ ] **Step 14.5: Run tests** + +Run: +```bash +cargo test --lib stack_trace::tests::process_real_world_spring_fixture +``` +Expected: PASS. + +- [ ] **Step 14.6: Commit** + +```bash +git add tests/fixtures/java/failsafe-reports/ tests/fixtures/java/stack-traces/ src/cmds/java/stack_trace.rs +git commit -m "test(mvn): add failsafe + real-world stack trace fixtures + +Two failsafe-report XMLs (ApplicationContext failure, port conflict) +and a Spring Caused-by chain for stack_trace::process coverage." +``` + +--- + +## Task 15: Capture `started_at` and detect appPackage in `run_test` + +**Files:** +- Modify: `src/cmds/java/mvn_cmd.rs` + +- [ ] **Step 15.1: Read current `run_test`** + +Run: +```bash +grep -n 'fn run_test\|fn run_mvn_test\|pub fn run' src/cmds/java/mvn_cmd.rs | head +``` +Identify the function that executes `mvn test` (likely `run_test` or similar). Open and read its full body so you understand where `execute_command` is called. + +- [ ] **Step 15.2: Add fields to the pre-exec closure** + +Modify `run_test` (or equivalent) to: +1. Capture `let started_at = std::time::SystemTime::now();` **immediately before** the `execute_command` call. +2. Capture `let cwd = std::env::current_dir().unwrap_or_else(|_| std::path::PathBuf::from("."));` immediately after. +3. Compute `let app_pkg = crate::cmds::java::pom_groupid::detect(&cwd);` (cheap — happens once). +4. After `filter_mvn_test(&stdout_string)` call, pass the result through the new enrichment (implemented in next task, for now just assign to the variable and leave the downstream `print!/tracking::record` untouched). + +Intermediate patch (applies in this task, prepares scaffold for Task 16): +```rust +// Just before execute_command: +let started_at = std::time::SystemTime::now(); +let cwd = std::env::current_dir().unwrap_or_else(|_| std::path::PathBuf::from(".")); +let app_pkg = crate::cmds::java::pom_groupid::detect(&cwd); + +// ...existing exec + filter_mvn_test call producing `filtered`... + +let enriched = enrich_with_reports(&filtered, &cwd, started_at, app_pkg.as_deref()); +// Replace all downstream uses of `filtered` with `enriched`. +``` + +At this point, define a temporary passthrough: +```rust +fn enrich_with_reports( + text: &str, + _cwd: &std::path::Path, + _since: std::time::SystemTime, + _app_package: Option<&str>, +) -> String { + text.to_string() +} +``` + +- [ ] **Step 15.3: Run full test suite** + +Run: +```bash +cargo test --all +``` +Expected: ALL previous tests still pass (enrichment is identity). + +- [ ] **Step 15.4: Commit** + +```bash +git add src/cmds/java/mvn_cmd.rs +git commit -m "refactor(mvn): wire started_at/cwd/app_pkg into run_test + +Prepares scaffolding for XML report enrichment. enrich_with_reports is +currently an identity function; real logic lands in the next commit." +``` + +--- + +## Task 16: Implement `enrich_with_reports` + `render_enriched` + +**Files:** +- Modify: `src/cmds/java/mvn_cmd.rs` + +- [ ] **Step 16.1: Add failing test for happy-path short-circuit** + +Append to the `#[cfg(test)] mod tests` block in `src/cmds/java/mvn_cmd.rs`: +```rust + #[test] + fn enrich_happy_path_passes_through_without_io() { + let tmp = tempfile::tempdir().unwrap(); + // No target/ directory exists under tmp — ensures no I/O fallback would succeed. + let text = "mvn test: 42 passed (1.234 s)"; + let out = super::enrich_with_reports( + text, + tmp.path(), + std::time::SystemTime::now(), + Some("com.example"), + ); + assert_eq!(out, text); + } + + #[test] + fn enrich_no_tests_with_no_reports_emits_red_flag() { + let tmp = tempfile::tempdir().unwrap(); + let text = "mvn test: no tests run"; + let out = super::enrich_with_reports( + text, + tmp.path(), + std::time::SystemTime::now(), + Some("com.example"), + ); + assert!(out.contains("0 tests executed")); + assert!(out.contains("rtk proxy mvn test") || out.contains("surefire")); + } + + #[test] + fn enrich_with_surefire_fixture_appends_failures_section() { + let tmp = tempfile::tempdir().unwrap(); + let reports_dir = tmp.path().join("target/surefire-reports"); + std::fs::create_dir_all(&reports_dir).unwrap(); + std::fs::copy( + "tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml", + reports_dir.join("TEST-com.example.FailingTest.xml"), + ) + .unwrap(); + + let since = std::time::SystemTime::now() - std::time::Duration::from_secs(60); + let text = "mvn test: 4 run, 2 failed (01:02 min)\nBUILD FAILURE"; + let out = super::enrich_with_reports(text, tmp.path(), since, Some("com.example")); + + assert!(out.contains("Failures (from surefire-reports/)")); + assert!(out.contains("com.example.FailingTest.shouldReturnUser")); + assert!(out.contains("reports:")); + } + + #[test] + fn enrich_with_both_report_dirs_appends_both_sections() { + let tmp = tempfile::tempdir().unwrap(); + let sf = tmp.path().join("target/surefire-reports"); + let fs = tmp.path().join("target/failsafe-reports"); + std::fs::create_dir_all(&sf).unwrap(); + std::fs::create_dir_all(&fs).unwrap(); + std::fs::copy( + "tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml", + sf.join("TEST-com.example.FailingTest.xml"), + ) + .unwrap(); + std::fs::copy( + "tests/fixtures/java/failsafe-reports/TEST-com.example.DbIntegrationIT.xml", + fs.join("TEST-com.example.DbIntegrationIT.xml"), + ) + .unwrap(); + + let since = std::time::SystemTime::now() - std::time::Duration::from_secs(60); + let text = "mvn verify: 10 run, 3 failed (03:30 min)\nBUILD FAILURE"; + let out = super::enrich_with_reports(text, tmp.path(), since, Some("com.example")); + assert!(out.contains("Failures (from surefire-reports/)")); + assert!(out.contains("Integration failures (from failsafe-reports/)")); + assert!(out.contains("Caused by: org.hibernate.HibernateException")); + } + + #[test] + fn enrich_failures_without_xml_appends_hint() { + let tmp = tempfile::tempdir().unwrap(); + let text = "mvn test: 5 run, 2 failed (0.500 s)\nBUILD FAILURE"; + let out = super::enrich_with_reports( + text, + tmp.path(), + std::time::SystemTime::now(), + Some("com.example"), + ); + assert!(out.contains("no XML reports")); + assert!(out.contains("rtk proxy mvn test")); + } +``` + +- [ ] **Step 16.2: Run tests — they fail** + +Run: +```bash +cargo test --lib mvn_cmd::tests::enrich +``` +Expected: happy-path PASS (identity), the rest FAIL. + +- [ ] **Step 16.3: Replace `enrich_with_reports` stub with real implementation** + +Remove the stub added in Task 15 and replace with: +```rust +use crate::cmds::java::surefire_reports::{self, FailureKind, SurefireResult, TestFailure}; + +const MAX_FAILURES_PER_SOURCE: usize = 10; + +/// Wrap the text-filter summary with structured failure details sourced from +/// `target/surefire-reports/` and `target/failsafe-reports/` XML files. +pub(crate) fn enrich_with_reports( + text_summary: &str, + cwd: &std::path::Path, + since: std::time::SystemTime, + app_package: Option<&str>, +) -> String { + if !text_summary.starts_with("mvn ") { + return text_summary.to_string(); + } + + let zero_tests = text_summary == "mvn test: no tests run" + || text_summary.contains("0 passed"); + let has_failures = + text_summary.contains("failed") || text_summary.contains("BUILD FAILURE"); + let looks_clean = text_summary.contains("passed (") + && !text_summary.contains("failed") + && !text_summary.contains("BUILD FAILURE"); + + if looks_clean && !zero_tests { + return text_summary.to_string(); + } + + let sf = surefire_reports::parse_dir( + &cwd.join("target/surefire-reports"), + Some(since), + app_package, + ); + let fs = surefire_reports::parse_dir( + &cwd.join("target/failsafe-reports"), + Some(since), + app_package, + ); + + match (zero_tests, has_failures, &sf, &fs) { + (true, _, None, None) => { + "mvn test: 0 tests executed — surefire nie wykrył testów. \ + Sprawdź pom.xml (plugin surefire configuration) lub uruchom: \ + rtk proxy mvn test" + .to_string() + } + (_, true, None, None) => format!( + "{text_summary}\n(no XML reports found — check target/surefire-reports/ \ + or run: rtk proxy mvn test)" + ), + _ => render_enriched(text_summary, sf.as_ref(), fs.as_ref()), + } +} + +fn render_enriched( + text_summary: &str, + surefire: Option<&SurefireResult>, + failsafe: Option<&SurefireResult>, +) -> String { + let mut out = String::from(text_summary); + + if let Some(sf) = surefire { + if !sf.failures.is_empty() { + out.push_str("\n\nFailures (from surefire-reports/):\n"); + render_failure_block(&mut out, &sf.failures); + } + } + + if let Some(fs) = failsafe { + if !fs.failures.is_empty() { + out.push_str("\n\nIntegration failures (from failsafe-reports/):\n"); + render_failure_block(&mut out, &fs.failures); + } + } + + let footer = render_footer(surefire, failsafe); + if !footer.is_empty() { + out.push_str("\n\n"); + out.push_str(&footer); + } + + out +} + +fn render_failure_block(out: &mut String, failures: &[TestFailure]) { + use std::fmt::Write; + let shown = failures.iter().take(MAX_FAILURES_PER_SOURCE); + for (i, f) in shown.enumerate() { + writeln!(out, "{}. {}.{}", i + 1, f.test_class, f.test_method).ok(); + if let Some(kind_label) = failure_kind_label(f) { + writeln!(out, " {kind_label}").ok(); + } + if let Some(trace) = &f.stack_trace { + for line in trace.lines() { + writeln!(out, " {line}").ok(); + } + } + if let Some(output) = f.test_output.as_deref().filter(|s| !s.is_empty()) { + writeln!(out, " captured output:").ok(); + for line in output.lines() { + writeln!(out, " {line}").ok(); + } + } + out.push('\n'); + } + if failures.len() > MAX_FAILURES_PER_SOURCE { + writeln!( + out, + "... +{} more failures", + failures.len() - MAX_FAILURES_PER_SOURCE + ) + .ok(); + } +} + +fn failure_kind_label(f: &TestFailure) -> Option { + let msg = f.message.as_deref().unwrap_or("").trim(); + let ty = f + .failure_type + .as_deref() + .and_then(|t| t.rsplit('.').next()) + .unwrap_or(""); + match (ty.is_empty(), msg.is_empty()) { + (true, true) => None, + (true, false) => Some(msg.to_string()), + (false, true) => Some(ty.to_string()), + (false, false) => Some(format!("{ty}: {msg}")), + } + .map(|s| match f.kind { + FailureKind::Error => format!("[error] {s}"), + FailureKind::Failure => s, + }) +} + +fn render_footer( + surefire: Option<&SurefireResult>, + failsafe: Option<&SurefireResult>, +) -> String { + let mut parts: Vec = Vec::new(); + let (sf_read, sf_stale, sf_bad) = counts(surefire); + let (fs_read, fs_stale, fs_bad) = counts(failsafe); + + if sf_read > 0 { + parts.push(format!("{sf_read} surefire")); + } + if fs_read > 0 { + parts.push(format!("{fs_read} failsafe")); + } + let stale = sf_stale + fs_stale; + if stale > 0 { + parts.push(format!("{stale} stale files skipped")); + } + let malformed = sf_bad + fs_bad; + if malformed > 0 { + parts.push(format!("{malformed} malformed")); + } + if parts.is_empty() { + return String::new(); + } + format!("(reports: {})", parts.join(", ")) +} + +fn counts(r: Option<&SurefireResult>) -> (usize, usize, usize) { + r.map(|x| (x.files_read, x.files_skipped_stale, x.files_malformed)) + .unwrap_or((0, 0, 0)) +} +``` + +- [ ] **Step 16.4: Run tests** + +Run: +```bash +cargo test --lib mvn_cmd::tests::enrich +``` +Expected: 5 PASS. + +- [ ] **Step 16.5: Run full suite to catch regressions** + +Run: +```bash +cargo test --all +``` +Expected: ALL PASS (incl. existing filter_mvn_test snapshot tests). + +- [ ] **Step 16.6: Commit** + +```bash +git add src/cmds/java/mvn_cmd.rs +git commit -m "feat(mvn): enrich test output with Surefire/Failsafe XML + +Appends a structured Failures section for each report directory, with +per-failure stack trace (framework-frame-collapsed), optional captured +output, and a reports-processed footer. Short-circuits on happy path +to avoid I/O. Emits a red-flag message when 'no tests run' is reported +but also no fresh XML reports are present." +``` + +--- + +## Task 17: Snapshot tests for enriched output + +**Files:** +- Modify: `src/cmds/java/mvn_cmd.rs` + +- [ ] **Step 17.1: Add snapshot tests** + +Append to the `#[cfg(test)] mod tests` block (after existing `enrich_*` tests): +```rust + #[test] + fn snapshot_enriched_surefire_only() { + let tmp = tempfile::tempdir().unwrap(); + let reports = tmp.path().join("target/surefire-reports"); + std::fs::create_dir_all(&reports).unwrap(); + for name in [ + "TEST-com.example.FailingTest.xml", + "TEST-com.example.PassingTest.xml", + ] { + std::fs::copy( + format!("tests/fixtures/java/surefire-reports/{name}"), + reports.join(name), + ) + .unwrap(); + } + + let since = std::time::SystemTime::now() - std::time::Duration::from_secs(60); + let text = "mvn test: 7 run, 2 failed (00:10 min)\nBUILD FAILURE"; + let out = super::enrich_with_reports(text, tmp.path(), since, Some("com.example")); + insta::assert_snapshot!(out); + } + + #[test] + fn snapshot_enriched_surefire_and_failsafe() { + let tmp = tempfile::tempdir().unwrap(); + let sf = tmp.path().join("target/surefire-reports"); + let fs = tmp.path().join("target/failsafe-reports"); + std::fs::create_dir_all(&sf).unwrap(); + std::fs::create_dir_all(&fs).unwrap(); + std::fs::copy( + "tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml", + sf.join("TEST-com.example.FailingTest.xml"), + ) + .unwrap(); + std::fs::copy( + "tests/fixtures/java/failsafe-reports/TEST-com.example.DbIntegrationIT.xml", + fs.join("TEST-com.example.DbIntegrationIT.xml"), + ) + .unwrap(); + std::fs::copy( + "tests/fixtures/java/failsafe-reports/TEST-com.example.PortConflictIT.xml", + fs.join("TEST-com.example.PortConflictIT.xml"), + ) + .unwrap(); + + let since = std::time::SystemTime::now() - std::time::Duration::from_secs(60); + let text = "mvn verify: 12 run, 4 failed (05:42 min)\nBUILD FAILURE"; + let out = super::enrich_with_reports(text, tmp.path(), since, Some("com.example")); + insta::assert_snapshot!(out); + } + + #[test] + fn snapshot_red_flag_no_tests() { + let tmp = tempfile::tempdir().unwrap(); + let out = super::enrich_with_reports( + "mvn test: no tests run", + tmp.path(), + std::time::SystemTime::now(), + Some("com.example"), + ); + insta::assert_snapshot!(out); + } +``` + +- [ ] **Step 17.2: Generate snapshots** + +Run: +```bash +cargo test --lib mvn_cmd::tests::snapshot +``` +Expected: tests fail first run; run `cargo insta review` to inspect and accept, or run: +```bash +cargo insta accept +``` + +After acceptance, run: +```bash +cargo test --lib mvn_cmd::tests::snapshot +``` +Expected: 3 PASS. + +- [ ] **Step 17.3: Commit** + +```bash +git add src/cmds/java/mvn_cmd.rs src/cmds/java/snapshots/ +git commit -m "test(mvn): snapshot tests for enriched surefire/failsafe rendering + +Pins output format for surefire-only, both-report-dirs, and the no-tests +red-flag path. Adjust with 'cargo insta review' when output changes." +``` + +--- + +## Task 18: Token savings tests (happy path and failure path) + +**Files:** +- Create: `tests/fixtures/java/mvn-verify-multimodule-raw.txt` (real-world log — if not already present from Task 0's predecessor PR #1089; skip copy if the file exists) +- Modify: `src/cmds/java/mvn_cmd.rs` + +- [ ] **Step 18.1: Locate or synthesize a real multi-module mvn verify log** + +Check existing fixtures: +```bash +ls tests/fixtures/java/ 2>/dev/null || ls tests/fixtures/mvn/ 2>/dev/null +``` +If a `mvn verify` multi-module fixture exists (likely `tests/fixtures/java/mvn-verify-multimodule.txt` or similar, shipped by PR #1089), use its path in the test. If not, skip this task's real-world fixture; the synthetic fixtures already cover enrichment correctness. Token-savings test then operates only on synthetic data. + +- [ ] **Step 18.2: Add token-savings tests** + +Append to the `#[cfg(test)] mod tests` block: +```rust + fn count_tokens(s: &str) -> usize { + s.split_whitespace().count() + } + + #[test] + fn savings_happy_path_unchanged_by_enrichment() { + // Happy path short-circuits without I/O; savings must match pre-enrichment. + let text = "mvn test: 859 passed, 4 skipped (02:11 min)"; + let tmp = tempfile::tempdir().unwrap(); + let out = super::enrich_with_reports( + text, + tmp.path(), + std::time::SystemTime::now(), + Some("com.example"), + ); + assert_eq!(out, text, "happy path must not allocate or append"); + } + + #[test] + fn savings_enriched_failures_stays_under_15_percent() { + // Simulate a ~2000-line build log whose text filter produced a short + // summary, plus one big failsafe XML with system-err and a 3-segment + // Caused-by chain. Total enriched output must be ≥85% smaller than raw. + let raw_log = std::iter::repeat_n( + "[INFO] Running com.example.some.Heavy.Test — lots of noisy build output\n", + 2000, + ) + .collect::(); + + let tmp = tempfile::tempdir().unwrap(); + let fs = tmp.path().join("target/failsafe-reports"); + std::fs::create_dir_all(&fs).unwrap(); + std::fs::copy( + "tests/fixtures/java/failsafe-reports/TEST-com.example.DbIntegrationIT.xml", + fs.join("TEST-com.example.DbIntegrationIT.xml"), + ) + .unwrap(); + + let since = std::time::SystemTime::now() - std::time::Duration::from_secs(60); + let text_summary = "mvn verify: 4 run, 1 failed (01:23 min)\nBUILD FAILURE"; + let enriched = super::enrich_with_reports(text_summary, tmp.path(), since, Some("com.example")); + + let raw_tokens = count_tokens(&raw_log); + let enriched_tokens = count_tokens(&enriched); + let savings = 100.0 - (enriched_tokens as f64 / raw_tokens as f64 * 100.0); + assert!( + savings >= 85.0, + "expected ≥85% savings on enriched failure path, got {savings:.1}% \ + (raw={raw_tokens}, enriched={enriched_tokens})" + ); + } +``` + +- [ ] **Step 18.3: Run tests** + +Run: +```bash +cargo test --lib mvn_cmd::tests::savings +``` +Expected: 2 PASS. + +- [ ] **Step 18.4: Commit** + +```bash +git add src/cmds/java/mvn_cmd.rs +git commit -m "test(mvn): token savings — happy path identity, failure path ≥85% + +Asserts happy-path enrichment is a no-op and that even on the enriched +failure path with a multi-segment Caused-by chain we stay under 15% of +the raw log size." +``` + +--- + +## Task 19: Performance gate — hyperfine on release build + +**Files:** (no source changes; just a verification command + optional release rebuild) + +- [ ] **Step 19.1: Build release binary** + +Run: +```bash +cargo build --release +``` +Expected: PASS. + +- [ ] **Step 19.2: Create a synthetic reports directory for benchmark** + +Run: +```bash +mkdir -p /tmp/rtk-perf/target/surefire-reports +for i in $(seq 1 50); do + cp tests/fixtures/java/surefire-reports/TEST-com.example.PassingTest.xml \ + "/tmp/rtk-perf/target/surefire-reports/TEST-com.example.Pass$i.xml" +done +``` + +- [ ] **Step 19.3: Benchmark happy path (no I/O)** + +Run: +```bash +hyperfine --warmup 3 --runs 20 \ + "cd /tmp/rtk-perf && $(pwd)/target/release/rtk --version" +``` +Expected: median < 10ms. + +- [ ] **Step 19.4: Document result** + +Paste the hyperfine output into the commit message (next task) or a NOTES file. If median exceeds 10ms, investigate before proceeding. + +- [ ] **Step 19.5: No commit** (this task is verification only). Clean up: + +```bash +rm -rf /tmp/rtk-perf +``` + +--- + +## Task 20: Docs — README, CHANGELOG + +**Files:** +- Modify: `src/cmds/java/README.md` (create if missing) +- Modify: `CHANGELOG.md` + +- [ ] **Step 20.1: Check existing README structure** + +Run: +```bash +ls src/cmds/java/README.md 2>/dev/null && head -40 src/cmds/java/README.md +``` +If missing, you'll create it. If present, note its existing sections so the enrichment section fits the tone. + +- [ ] **Step 20.2: Update README** + +Either create `src/cmds/java/README.md` with the content below, or append a new section. + +If creating, use this minimal template: +```markdown +# rtk — Maven (Java) Filter + +rtk filters and enriches Maven build output (test, compile, checkstyle, +dependency:tree, verify, integration-test, install) for LLM consumption. + +## Output enrichment from Surefire/Failsafe XML reports + +When `mvn test` (or verify/integration-test) reports failures, rtk reads +`target/surefire-reports/TEST-*.xml` and `target/failsafe-reports/*.xml` +**after** the build finishes and appends a structured Failures section +with: + +- Full stack trace per failure, with framework frames collapsed and the + root-cause segment preserved (up to 50 lines per trace). +- Captured stdout + stderr from failing tests only, capped at 2000 chars + per test and 10000 chars total. +- File counters in the footer: `(reports: N surefire, M failsafe, K stale files skipped)`. + +### Application-package detection + +rtk classifies stack frames as *application* vs *framework* by comparing +frame class names against the Java `groupId` from `pom.xml`: + +1. `RTK_MVN_APP_PACKAGE` env var (if set, overrides everything). +2. `/` from the pom.xml in the current working directory. +3. Fallback: `//`. +4. Otherwise: no filtering — full stack traces are preserved. + +### Time-gated report reads + +Stale XML reports from previous runs are skipped: only files with +`mtime >= started_at` (captured just before `mvn` executes) are parsed. + +### Red-flag heuristic for "0 tests" + +If the summary says `no tests run` but surefire reports are empty or +absent, rtk emits a diagnostic instead of the silent summary: + +``` +mvn test: 0 tests executed — surefire nie wykrył testów. +Sprawdź pom.xml (plugin surefire configuration) lub uruchom: rtk proxy mvn test +``` + +### Bypass + +For the rare cases where you need the full raw Maven output: + +```bash +rtk proxy mvn test +``` +``` + +- [ ] **Step 20.3: Update CHANGELOG.md** + +Add under the latest unreleased/next version heading: +```markdown +### Added +- `mvn test` / `mvn verify` / `mvn integration-test` output is now + enriched with structured failure details read from + `target/surefire-reports/TEST-*.xml` and `target/failsafe-reports/*.xml`. + Stack traces are segmented on `Caused by:` with framework frames + collapsed; the root-cause segment is always preserved. +- Application-package autodetect from `pom.xml` `` (with parent + fallback) for framework-frame classification. Override via + `RTK_MVN_APP_PACKAGE`. +- Red-flag heuristic: `no tests run` with no fresh XML reports emits a + diagnostic pointing at surefire misconfiguration. +``` + +- [ ] **Step 20.4: Verify** + +Run: +```bash +cargo build +``` +Expected: PASS (README/CHANGELOG changes don't affect build). + +- [ ] **Step 20.5: Commit** + +```bash +git add src/cmds/java/README.md CHANGELOG.md +git commit -m "docs(mvn): document XML enrichment, appPackage detection, red-flag + +Describes the new post-filter XML read, groupId autodetect order, +stale-file time-gate, and the rtk proxy escape hatch." +``` + +--- + +## Task 21: Final quality gate + +**Files:** (no source changes) + +- [ ] **Step 21.1: Full fmt + clippy + test cycle** + +Run: +```bash +cargo fmt --all +cargo clippy --all-targets -- -D warnings +cargo test --all +``` +Expected: ALL PASS with zero clippy warnings. If any clippy warning appears, fix it inline in the offending file before proceeding. + +- [ ] **Step 21.2: Snapshot review** + +Run: +```bash +cargo insta pending-snapshots +``` +If any pending, run `cargo insta review` and decide. Stage and commit any accepted snapshot updates as: +```bash +git add src/cmds/java/snapshots/ +git commit -m "test(mvn): accept reviewed snapshots" +``` + +- [ ] **Step 21.3: Push branch** + +Run: +```bash +git push -u origin feat/mvn-surefire-xml +``` + +- [ ] **Step 21.4: Open PR against fork's `master`** + +Run: +```bash +gh pr create \ + --repo mariuszs/rtk-java \ + --base master \ + --head mariuszs:feat/mvn-surefire-xml \ + --title "feat(mvn): enrich test output with Surefire/Failsafe XML reports" \ + --body "$(cat <<'EOF' +## Summary + +- Ports maven-mcp's `SurefireReportParser` + `StackTraceProcessor` to Rust. +- Adds `pom.xml` `` autodetect for framework-frame classification. +- Post-text-filter enrichment reads `target/surefire-reports/` and `target/failsafe-reports/` XMLs (time-gated by `started_at`) and appends structured failure details to the rtk mvn output. +- Red-flag heuristic: `no tests run` with no fresh reports now surfaces a diagnostic instead of silently pretending everything is fine. +- Override via `RTK_MVN_APP_PACKAGE` env var. + +Spec: `docs/superpowers/specs/2026-04-15-mvn-surefire-xml-enrichment-design.md` +Plan: `docs/superpowers/plans/2026-04-15-mvn-surefire-xml-enrichment.md` + +Stacks on `feat/mvn-rust-module` (upstream PR rtk-ai/rtk#1089). + +## Test plan + +- [x] `cargo test --all` (incl. new stack_trace, surefire_reports, pom_groupid, mvn_cmd tests) +- [x] `cargo clippy --all-targets -- -D warnings` +- [x] `cargo fmt --all` +- [x] `cargo insta review` — snapshots accepted +- [x] Token savings: happy path identity, failure path ≥85% (verified in tests) +- [x] Release build + hyperfine: happy-path startup < 10ms +EOF +)" +``` + +Expected: PR URL printed. Paste into this checklist: +- [ ] PR URL: `` + +--- + +## Self-Review + +Completed after first draft of this plan. No placeholders detected. All tasks reference concrete files, code, and commands. Task 15's integration into `run_test` refers to existing code — the implementer must read the current `mvn_cmd.rs` to locate the exact line but the insertion semantics are unambiguous. Task 18's token-savings test uses synthetic data when no real-world log fixture exists; this is explicitly called out. Task 19 is verification-only and commits nothing. Task 20 creates README if missing — the template is complete. Task 21 handles all pre-PR gates. + +**Spec coverage check (cross-reference with `2026-04-15-mvn-surefire-xml-enrichment-design.md`):** + +| Spec section | Task(s) | +|---|---| +| `surefire_reports.rs` — types | 9 | +| `surefire_reports.rs` — single-file parse | 9, 10 | +| `surefire_reports.rs` — parse_dir + time-gate | 11 | +| `surefire_reports.rs` — total_output_limit | 12 | +| `stack_trace.rs` — parse_segments | 1 | +| `stack_trace.rs` — truncate_header | 2 | +| `stack_trace.rs` — frame classification | 3 | +| `stack_trace.rs` — add_collapsed_frames | 4 | +| `stack_trace.rs` — add_root_cause_frames | 5 | +| `stack_trace.rs` — process | 6 | +| `stack_trace.rs` — apply_hard_cap | 7 | +| `pom_groupid.rs` — detect + fallback + override | 13 | +| Fixtures — surefire | 8 | +| Fixtures — failsafe + stack | 14 | +| `mvn_cmd.rs` — capture started_at / cwd / app_pkg | 15 | +| `mvn_cmd.rs` — enrich_with_reports + render_enriched | 16 | +| Snapshot tests | 17 | +| Token savings tests | 18 | +| Performance gate | 19 | +| Docs | 20 | +| Final quality gate + PR | 21 | + +All spec sections have a corresponding task. No gaps. From 2a40a8a8a2c993c07d8d9c3e6f113ffe562da88c Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 21:02:16 +0200 Subject: [PATCH 23/44] chore(mvn): scaffold surefire-xml modules and fixture dirs Empty stubs for stack_trace, surefire_reports, pom_groupid. Adds filetime dev-dep for mtime-based time-gate tests in later tasks. --- Cargo.lock | 66 +++++++++++++++++++ Cargo.toml | 2 + src/cmds/java/pom_groupid.rs | 3 + src/cmds/java/stack_trace.rs | 5 ++ src/cmds/java/surefire_reports.rs | 4 ++ tests/fixtures/java/failsafe-reports/.gitkeep | 0 tests/fixtures/java/poms/.gitkeep | 0 tests/fixtures/java/stack-traces/.gitkeep | 0 tests/fixtures/java/surefire-reports/.gitkeep | 0 9 files changed, 80 insertions(+) create mode 100644 src/cmds/java/pom_groupid.rs create mode 100644 src/cmds/java/stack_trace.rs create mode 100644 src/cmds/java/surefire_reports.rs create mode 100644 tests/fixtures/java/failsafe-reports/.gitkeep create mode 100644 tests/fixtures/java/poms/.gitkeep create mode 100644 tests/fixtures/java/stack-traces/.gitkeep create mode 100644 tests/fixtures/java/surefire-reports/.gitkeep diff --git a/Cargo.lock b/Cargo.lock index 7f33886e0..71a91f8bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -233,6 +233,17 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "console" +version = "0.16.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d64e8af5551369d19cf50138de61f1c42074ab970f74e99be916646777f8fc87" +dependencies = [ + "encode_unicode", + "libc", + "windows-sys 0.61.2", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -334,6 +345,12 @@ dependencies = [ "syn", ] +[[package]] +name = "encode_unicode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" + [[package]] name = "env_home" version = "0.1.0" @@ -374,6 +391,17 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "filetime" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" +dependencies = [ + "cfg-if", + "libc", + "libredox", +] + [[package]] name = "find-msvc-tools" version = "0.1.9" @@ -651,6 +679,18 @@ dependencies = [ "serde_core", ] +[[package]] +name = "insta" +version = "1.47.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4a6248eb93a4401ed2f37dfe8ea592d3cf05b7cf4f8efa867b6895af7e094e" +dependencies = [ + "console", + "once_cell", + "similar", + "tempfile", +] + [[package]] name = "is_terminal_polyfill" version = "1.70.2" @@ -697,7 +737,10 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a" dependencies = [ + "bitflags", "libc", + "plain", + "redox_syscall", ] [[package]] @@ -784,6 +827,12 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + [[package]] name = "potential_utf" version = "0.1.4" @@ -836,6 +885,15 @@ version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" +[[package]] +name = "redox_syscall" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f450ad9c3b1da563fb6948a8e0fb0fb9269711c9c73d9ea1de5058c79c8d643a" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.4.6" @@ -900,9 +958,11 @@ dependencies = [ "clap", "colored", "dirs", + "filetime", "flate2", "getrandom 0.4.2", "ignore", + "insta", "lazy_static", "libc", "quick-xml", @@ -1077,6 +1137,12 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" +[[package]] +name = "similar" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" + [[package]] name = "smallvec" version = "1.15.1" diff --git a/Cargo.toml b/Cargo.toml index ec55eea29..68942ff54 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,8 @@ libc = "0.2" toml = "0.8" [dev-dependencies] +filetime = "0.2" +insta = "1" [profile.release] opt-level = 3 diff --git a/src/cmds/java/pom_groupid.rs b/src/cmds/java/pom_groupid.rs new file mode 100644 index 000000000..6763e873d --- /dev/null +++ b/src/cmds/java/pom_groupid.rs @@ -0,0 +1,3 @@ +//! Autodetects the application Java package from `pom.xml `. +//! Used by `surefire_reports` / `stack_trace` to classify application frames. +//! Can be overridden by `RTK_MVN_APP_PACKAGE` env var. diff --git a/src/cmds/java/stack_trace.rs b/src/cmds/java/stack_trace.rs new file mode 100644 index 000000000..575afe3e7 --- /dev/null +++ b/src/cmds/java/stack_trace.rs @@ -0,0 +1,5 @@ +//! Port of maven-mcp's StackTraceProcessor. +//! +//! Parses Java stack traces into segments (top-level exception + Caused by +//! chains), classifies frames as application or framework by package prefix, +//! collapses framework noise, and preserves root-cause frames. diff --git a/src/cmds/java/surefire_reports.rs b/src/cmds/java/surefire_reports.rs new file mode 100644 index 000000000..e679f22fa --- /dev/null +++ b/src/cmds/java/surefire_reports.rs @@ -0,0 +1,4 @@ +//! Parses Maven Surefire/Failsafe XML test reports from +//! `target/surefire-reports/TEST-*.xml` and `target/failsafe-reports/*.xml`. +//! Uses quick-xml streaming parser. Time-gated by `started_at` to skip stale +//! reports from previous runs. diff --git a/tests/fixtures/java/failsafe-reports/.gitkeep b/tests/fixtures/java/failsafe-reports/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/tests/fixtures/java/poms/.gitkeep b/tests/fixtures/java/poms/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/tests/fixtures/java/stack-traces/.gitkeep b/tests/fixtures/java/stack-traces/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/tests/fixtures/java/surefire-reports/.gitkeep b/tests/fixtures/java/surefire-reports/.gitkeep new file mode 100644 index 000000000..e69de29bb From 82f66704cec35ae5879c04832ccc5fc940044f51 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 21:21:03 +0200 Subject: [PATCH 24/44] feat(mvn): add stack trace segment parser Splits Java stack traces on top-level 'Caused by:' while keeping indented Caused by lines inside Suppressed blocks as frames. --- src/cmds/java/stack_trace.rs | 110 +++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/src/cmds/java/stack_trace.rs b/src/cmds/java/stack_trace.rs index 575afe3e7..a1c0330e7 100644 --- a/src/cmds/java/stack_trace.rs +++ b/src/cmds/java/stack_trace.rs @@ -3,3 +3,113 @@ //! Parses Java stack traces into segments (top-level exception + Caused by //! chains), classifies frames as application or framework by package prefix, //! collapses framework noise, and preserves root-cause frames. + +#[derive(Debug, PartialEq)] +#[allow(dead_code)] +pub(crate) struct Segment { + pub(crate) header: String, + pub(crate) frames: Vec, +} + +/// Split a stack trace into segments. +/// +/// The first non-empty line becomes the header of segment 0. Each subsequent +/// line starting with the literal `"Caused by:"` (no leading whitespace) closes +/// the current segment and opens a new one. All other lines append to the +/// current segment's frames. +/// +/// Indented `"\tCaused by:"` inside Suppressed blocks stays as a frame and +/// does NOT split segments — `is_structural_line` preserves it during frame +/// collapsing. +#[allow(dead_code)] +pub(crate) fn parse_segments(trace: &str) -> Vec { + let trace = trace.trim(); + if trace.is_empty() { + return Vec::new(); + } + + let mut segments = Vec::new(); + let mut current_header: Option = None; + let mut current_frames: Vec = Vec::new(); + + for line in trace.lines() { + if current_header.is_none() { + current_header = Some(line.to_string()); + } else if line.starts_with("Caused by:") { + segments.push(Segment { + header: current_header.take().unwrap(), + frames: std::mem::take(&mut current_frames), + }); + current_header = Some(line.to_string()); + } else { + current_frames.push(line.to_string()); + } + } + + if let Some(header) = current_header { + segments.push(Segment { + header, + frames: current_frames, + }); + } + + segments +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_segments_empty_input_returns_empty() { + assert!(parse_segments("").is_empty()); + } + + #[test] + fn parse_segments_single_header_no_frames() { + let trace = "java.lang.RuntimeException: boom"; + let segs = parse_segments(trace); + assert_eq!(segs.len(), 1); + assert_eq!(segs[0].header, "java.lang.RuntimeException: boom"); + assert!(segs[0].frames.is_empty()); + } + + #[test] + fn parse_segments_single_segment_with_frames() { + let trace = "java.lang.RuntimeException: boom\n\ + \tat com.example.A.foo(A.java:1)\n\ + \tat com.example.B.bar(B.java:2)"; + let segs = parse_segments(trace); + assert_eq!(segs.len(), 1); + assert_eq!(segs[0].frames.len(), 2); + } + + #[test] + fn parse_segments_caused_by_starts_new_segment() { + let trace = "java.lang.RuntimeException: outer\n\ + \tat com.example.A.foo(A.java:1)\n\ + Caused by: java.io.IOException: inner\n\ + \tat com.example.B.bar(B.java:2)"; + let segs = parse_segments(trace); + assert_eq!(segs.len(), 2); + assert_eq!(segs[0].header, "java.lang.RuntimeException: outer"); + assert_eq!(segs[0].frames, vec!["\tat com.example.A.foo(A.java:1)"]); + assert_eq!(segs[1].header, "Caused by: java.io.IOException: inner"); + assert_eq!(segs[1].frames, vec!["\tat com.example.B.bar(B.java:2)"]); + } + + #[test] + fn parse_segments_indented_caused_by_stays_as_frame() { + // Inside a Suppressed block, the "Caused by:" is indented and must NOT + // split segments — it stays as a frame so structural handling keeps it. + let trace = "java.lang.RuntimeException: outer\n\ + \tSuppressed: java.io.IOException: suppressed\n\ + \t\tat com.example.A.foo(A.java:1)\n\ + \t\tCaused by: java.lang.Error: nested\n\ + Caused by: java.io.IOException: real cause"; + let segs = parse_segments(trace); + assert_eq!(segs.len(), 2, "indented Caused by must not split segments"); + assert_eq!(segs[0].frames.len(), 3, "Suppressed block stays in outer"); + assert_eq!(segs[1].header, "Caused by: java.io.IOException: real cause"); + } +} From 1b84cce973365fd5b9eebe657ef8eb0de93ec2f8 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 21:23:53 +0200 Subject: [PATCH 25/44] feat(mvn): add UTF-8-safe stack trace header truncation Counts Unicode chars, not bytes. 200-char cap matches maven-mcp original. --- src/cmds/java/stack_trace.rs | 44 ++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/src/cmds/java/stack_trace.rs b/src/cmds/java/stack_trace.rs index a1c0330e7..e4ffe9a5d 100644 --- a/src/cmds/java/stack_trace.rs +++ b/src/cmds/java/stack_trace.rs @@ -4,6 +4,8 @@ //! chains), classifies frames as application or framework by package prefix, //! collapses framework noise, and preserves root-cause frames. +const MAX_HEADER_LENGTH: usize = 200; + #[derive(Debug, PartialEq)] #[allow(dead_code)] pub(crate) struct Segment { @@ -56,6 +58,18 @@ pub(crate) fn parse_segments(trace: &str) -> Vec { segments } +/// Truncate a header to `MAX_HEADER_LENGTH` **Unicode characters** (not bytes), +/// appending "..." if truncated. +#[allow(dead_code)] +pub(crate) fn truncate_header(header: &str) -> String { + let char_count = header.chars().count(); + if char_count <= MAX_HEADER_LENGTH { + return header.to_string(); + } + let truncated: String = header.chars().take(MAX_HEADER_LENGTH).collect(); + format!("{truncated}...") +} + #[cfg(test)] mod tests { use super::*; @@ -112,4 +126,34 @@ mod tests { assert_eq!(segs[0].frames.len(), 3, "Suppressed block stays in outer"); assert_eq!(segs[1].header, "Caused by: java.io.IOException: real cause"); } + + #[test] + fn truncate_header_short_passes_through() { + assert_eq!(truncate_header("short"), "short"); + } + + #[test] + fn truncate_header_exact_200_chars_passes() { + let s = "a".repeat(200); + assert_eq!(truncate_header(&s), s); + } + + #[test] + fn truncate_header_over_200_chars_truncates_with_ellipsis() { + let s = "a".repeat(250); + let out = truncate_header(&s); + assert_eq!(out.chars().count(), 203); // 200 + "..." + assert!(out.ends_with("...")); + } + + #[test] + fn truncate_header_utf8_multibyte_safe() { + // 100 4-byte chars = 400 bytes but 100 chars — must not panic + let s = "日".repeat(100); + assert_eq!(truncate_header(&s), s); + let s = "日".repeat(250); + let out = truncate_header(&s); + assert_eq!(out.chars().count(), 203); + assert!(out.ends_with("...")); + } } From 00b78457cce0b9d37fa04b9f53e2cfe69a68b133 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 21:26:57 +0200 Subject: [PATCH 26/44] feat(mvn): classify stack frames as application vs framework Structural lines (Suppressed:, indented Caused by:) are always preserved during frame collapsing. --- src/cmds/java/stack_trace.rs | 94 ++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/src/cmds/java/stack_trace.rs b/src/cmds/java/stack_trace.rs index e4ffe9a5d..8c90c04db 100644 --- a/src/cmds/java/stack_trace.rs +++ b/src/cmds/java/stack_trace.rs @@ -70,6 +70,48 @@ pub(crate) fn truncate_header(header: &str) -> String { format!("{truncated}...") } +/// A stack frame belongs to the application if, after stripping whitespace and +/// the leading `"at "` marker, the remainder starts with `app_package`. +/// +/// When `app_package` is `None` or empty, every frame is considered an app frame +/// (framework collapsing disabled). Summary lines like `"\t... 42 more"` are +/// always framework artifacts. +#[allow(dead_code)] +pub(crate) fn is_application_frame(frame: &str, app_package: Option<&str>) -> bool { + let Some(pkg) = app_package.filter(|p| !p.is_empty()) else { + return true; + }; + let trimmed = frame.trim_start(); + let Some(after_at) = trimmed.strip_prefix("at ") else { + return false; + }; + after_at.starts_with(pkg) +} + +/// Structural lines must always be preserved even while collapsing framework +/// frames: Suppressed block headers and **indented** Caused-by lines (which +/// appear inside Suppressed blocks; top-level Caused-by is already a segment +/// boundary, not a frame). +#[allow(dead_code)] +pub(crate) fn is_structural_line(line: &str) -> bool { + if line.is_empty() { + return false; + } + let trimmed = line.trim_start(); + if trimmed.starts_with("Suppressed:") { + return true; + } + if trimmed.starts_with("Caused by:") { + // Only structural when indented (nested in suppressed). Top-level + // Caused by: is handled by parse_segments, not here. + return line + .chars() + .next() + .is_some_and(char::is_whitespace); + } + false +} + #[cfg(test)] mod tests { use super::*; @@ -156,4 +198,56 @@ mod tests { assert_eq!(out.chars().count(), 203); assert!(out.ends_with("...")); } + + #[test] + fn is_app_frame_no_filter_accepts_everything() { + assert!(is_application_frame("\tat com.example.A.foo(A.java:1)", None)); + assert!(is_application_frame("\tat org.springframework.boot.Run(Run.java:1)", None)); + assert!(is_application_frame("\t... 42 more", None)); + } + + #[test] + fn is_app_frame_with_package_accepts_matching() { + assert!(is_application_frame( + "\tat com.example.A.foo(A.java:1)", + Some("com.example"), + )); + assert!(!is_application_frame( + "\tat org.springframework.boot.Run(Run.java:1)", + Some("com.example"), + )); + } + + #[test] + fn is_app_frame_rejects_summary_dots() { + // "\t... 42 more" is a framework artifact, never app + assert!(!is_application_frame("\t... 42 more", Some("com.example"))); + } + + #[test] + fn is_app_frame_rejects_empty_or_whitespace() { + assert!(!is_application_frame("", Some("com.example"))); + assert!(!is_application_frame(" ", Some("com.example"))); + } + + #[test] + fn is_structural_suppressed_top_level() { + assert!(is_structural_line("\tSuppressed: java.io.IOException")); + assert!(is_structural_line("Suppressed: foo")); + } + + #[test] + fn is_structural_indented_caused_by_only() { + // Top-level "Caused by:" is a segment boundary, not structural + assert!(!is_structural_line("Caused by: java.io.IOException")); + // Indented "Caused by:" inside suppressed is structural + assert!(is_structural_line("\tCaused by: java.io.IOException")); + assert!(is_structural_line(" Caused by: nested")); + } + + #[test] + fn is_structural_regular_frame_no() { + assert!(!is_structural_line("\tat com.example.A.foo(A.java:1)")); + assert!(!is_structural_line("")); + } } From d4a33c92e7442beb602b3cbb792b2cc854949e30 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 21:31:43 +0200 Subject: [PATCH 27/44] feat(mvn): collapse consecutive framework frames Emits '... N framework frames omitted' for runs of non-app frames; preserves app and structural (Suppressed / nested Caused by) frames. --- src/cmds/java/stack_trace.rs | 111 +++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/src/cmds/java/stack_trace.rs b/src/cmds/java/stack_trace.rs index 8c90c04db..2487d0bf8 100644 --- a/src/cmds/java/stack_trace.rs +++ b/src/cmds/java/stack_trace.rs @@ -112,6 +112,47 @@ pub(crate) fn is_structural_line(line: &str) -> bool { false } +/// Push frames to `output`, collapsing runs of consecutive framework frames +/// into a single `"\t... N framework frames omitted"` marker. +/// +/// When `app_package` is `None`, all frames are considered app frames and no +/// collapsing occurs — pass-through mode. +#[allow(dead_code)] +pub(crate) fn add_collapsed_frames( + output: &mut Vec, + frames: &[String], + app_package: Option<&str>, +) { + let filter = app_package.is_some_and(|p| !p.is_empty()); + if !filter { + for frame in frames { + output.push(frame.clone()); + } + return; + } + + let mut framework_count: usize = 0; + for frame in frames { + let structural = is_structural_line(frame); + if structural || is_application_frame(frame, app_package) { + if framework_count > 0 { + output.push(format!("\t... {framework_count} framework frames omitted")); + framework_count = 0; + } + if structural { + output.push(truncate_header(frame)); + } else { + output.push(frame.clone()); + } + } else { + framework_count += 1; + } + } + if framework_count > 0 { + output.push(format!("\t... {framework_count} framework frames omitted")); + } +} + #[cfg(test)] mod tests { use super::*; @@ -250,4 +291,74 @@ mod tests { assert!(!is_structural_line("\tat com.example.A.foo(A.java:1)")); assert!(!is_structural_line("")); } + + fn collect_collapsed(frames: &[&str], app_package: Option<&str>) -> Vec { + let frames: Vec = frames.iter().map(|s| s.to_string()).collect(); + let mut out = Vec::new(); + add_collapsed_frames(&mut out, &frames, app_package); + out + } + + #[test] + fn collapse_no_filter_keeps_everything() { + let frames = [ + "\tat org.framework.Foo(Foo.java:1)", + "\tat com.example.A.foo(A.java:1)", + "\tat org.framework.Bar(Bar.java:2)", + ]; + let out = collect_collapsed(&frames, None); + assert_eq!(out.len(), 3); + } + + #[test] + fn collapse_all_framework_yields_single_summary() { + let frames = [ + "\tat org.framework.Foo(Foo.java:1)", + "\tat org.framework.Bar(Bar.java:2)", + "\tat org.framework.Baz(Baz.java:3)", + ]; + let out = collect_collapsed(&frames, Some("com.example")); + assert_eq!(out, vec!["\t... 3 framework frames omitted"]); + } + + #[test] + fn collapse_alternating_produces_multiple_summaries() { + let frames = [ + "\tat org.framework.Foo(Foo.java:1)", + "\tat com.example.A.one(A.java:10)", + "\tat org.framework.Bar(Bar.java:2)", + "\tat org.framework.Baz(Baz.java:3)", + "\tat com.example.B.two(B.java:20)", + ]; + let out = collect_collapsed(&frames, Some("com.example")); + assert_eq!( + out, + vec![ + "\t... 1 framework frames omitted", + "\tat com.example.A.one(A.java:10)", + "\t... 2 framework frames omitted", + "\tat com.example.B.two(B.java:20)", + ] + ); + } + + #[test] + fn collapse_preserves_structural_inline() { + let frames = [ + "\tat org.framework.Foo(Foo.java:1)", + "\tSuppressed: java.io.IOException", + "\t\tat org.framework.Bar(Bar.java:2)", + "\t\tCaused by: java.lang.Error: nested", + ]; + let out = collect_collapsed(&frames, Some("com.example")); + assert_eq!( + out, + vec![ + "\t... 1 framework frames omitted", + "\tSuppressed: java.io.IOException", + "\t... 1 framework frames omitted", + "\t\tCaused by: java.lang.Error: nested", + ] + ); + } } From 629f1692a07c3dff32e9a336c5b67e47431c7501 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 21:34:00 +0200 Subject: [PATCH 28/44] feat(mvn): cap root cause application frames at 10 Structural lines (Suppressed / nested Caused by) bypass the cap and are always preserved. --- src/cmds/java/stack_trace.rs | 106 +++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/src/cmds/java/stack_trace.rs b/src/cmds/java/stack_trace.rs index 2487d0bf8..06be636fd 100644 --- a/src/cmds/java/stack_trace.rs +++ b/src/cmds/java/stack_trace.rs @@ -5,6 +5,7 @@ //! collapses framework noise, and preserves root-cause frames. const MAX_HEADER_LENGTH: usize = 200; +const DEFAULT_ROOT_CAUSE_APP_FRAMES: usize = 10; #[derive(Debug, PartialEq)] #[allow(dead_code)] @@ -153,6 +154,47 @@ pub(crate) fn add_collapsed_frames( } } +/// Like `add_collapsed_frames`, but caps the number of non-structural +/// application frames at `DEFAULT_ROOT_CAUSE_APP_FRAMES`. Structural lines +/// (Suppressed, nested Caused by) bypass the cap. +#[allow(dead_code)] +pub(crate) fn add_root_cause_frames( + output: &mut Vec, + frames: &[String], + app_package: Option<&str>, +) { + let filter = app_package.is_some_and(|p| !p.is_empty()); + if !filter { + for frame in frames { + output.push(frame.clone()); + } + return; + } + + let mut app_count: usize = 0; + let mut framework_count: usize = 0; + for frame in frames { + let structural = is_structural_line(frame); + if structural || is_application_frame(frame, app_package) { + if framework_count > 0 { + output.push(format!("\t... {framework_count} framework frames omitted")); + framework_count = 0; + } + if structural { + output.push(truncate_header(frame)); + } else if app_count < DEFAULT_ROOT_CAUSE_APP_FRAMES { + output.push(frame.clone()); + app_count += 1; + } + } else { + framework_count += 1; + } + } + if framework_count > 0 { + output.push(format!("\t... {framework_count} framework frames omitted")); + } +} + #[cfg(test)] mod tests { use super::*; @@ -292,6 +334,70 @@ mod tests { assert!(!is_structural_line("")); } + fn collect_root_cause(frames: &[&str], app_package: Option<&str>) -> Vec { + let frames: Vec = frames.iter().map(|s| s.to_string()).collect(); + let mut out = Vec::new(); + add_root_cause_frames(&mut out, &frames, app_package); + out + } + + #[test] + fn root_cause_caps_app_frames_at_ten() { + let mut frames = Vec::new(); + for i in 0..15 { + frames.push(format!("\tat com.example.A.m{i}(A.java:{i})")); + } + let frame_refs: Vec<&str> = frames.iter().map(|s| s.as_str()).collect(); + let out = collect_root_cause(&frame_refs, Some("com.example")); + // 10 kept, 5 dropped silently (no "framework" marker because these are app frames) + assert_eq!(out.len(), 10); + } + + #[test] + fn root_cause_no_filter_keeps_all_frames() { + let mut frames = Vec::new(); + for i in 0..15 { + frames.push(format!("\tat com.example.A.m{i}(A.java:{i})")); + } + let frame_refs: Vec<&str> = frames.iter().map(|s| s.as_str()).collect(); + let out = collect_root_cause(&frame_refs, None); + assert_eq!(out.len(), 15); + } + + #[test] + fn root_cause_structural_bypasses_cap() { + // Structural lines are always preserved, even if we already hit the 10-app cap. + let mut frames = Vec::new(); + for i in 0..10 { + frames.push(format!("\tat com.example.A.m{i}(A.java:{i})")); + } + frames.push("\tSuppressed: x".to_string()); + frames.push("\tat com.example.Z.zzz(Z.java:99)".to_string()); // 11th app — dropped + let frame_refs: Vec<&str> = frames.iter().map(|s| s.as_str()).collect(); + let out = collect_root_cause(&frame_refs, Some("com.example")); + assert_eq!(out.len(), 11, "10 app frames + 1 structural, 11th app dropped"); + assert!(out.contains(&"\tSuppressed: x".to_string())); + } + + #[test] + fn root_cause_collapses_framework_as_before() { + let frames = [ + "\tat com.example.A.foo(A.java:1)", + "\tat org.framework.X(X.java:1)", + "\tat org.framework.Y(Y.java:2)", + "\tat com.example.B.bar(B.java:2)", + ]; + let out = collect_root_cause(&frames, Some("com.example")); + assert_eq!( + out, + vec![ + "\tat com.example.A.foo(A.java:1)", + "\t... 2 framework frames omitted", + "\tat com.example.B.bar(B.java:2)", + ] + ); + } + fn collect_collapsed(frames: &[&str], app_package: Option<&str>) -> Vec { let frames: Vec = frames.iter().map(|s| s.to_string()).collect(); let mut out = Vec::new(); From 552089e7754733164371a5e639a24344003b6842 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:12:31 +0200 Subject: [PATCH 29/44] feat(mvn): stack trace process orchestrator Wires parse_segments, add_collapsed_frames, add_root_cause_frames into the public process(raw, app_package, max_lines) API. Hard cap stubbed for next task. --- src/cmds/java/stack_trace.rs | 94 ++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/src/cmds/java/stack_trace.rs b/src/cmds/java/stack_trace.rs index 06be636fd..2f9d457fe 100644 --- a/src/cmds/java/stack_trace.rs +++ b/src/cmds/java/stack_trace.rs @@ -195,6 +195,56 @@ pub(crate) fn add_root_cause_frames( } } +/// Process a Java stack trace: +/// - Top-level header preserved (truncated to 200 chars). +/// - Non-root segments: header + `add_collapsed_frames`. +/// - Root (last) segment: header + `add_root_cause_frames`. +/// - If `max_lines > 0` and output exceeds the cap, apply hard-cap truncation +/// (implemented in a later task — currently returns full output). +/// +/// Returns `None` iff `raw` is empty or whitespace-only. +#[allow(dead_code)] +pub fn process(raw: &str, app_package: Option<&str>, max_lines: usize) -> Option { + let trimmed = raw.trim(); + if trimmed.is_empty() { + return None; + } + + let segments = parse_segments(trimmed); + if segments.is_empty() { + return Some(trimmed.to_string()); + } + + let mut out: Vec = Vec::new(); + out.push(truncate_header(&segments[0].header)); + + if segments.len() == 1 { + add_collapsed_frames(&mut out, &segments[0].frames, app_package); + } else { + add_collapsed_frames(&mut out, &segments[0].frames, app_package); + for seg in &segments[1..segments.len() - 1] { + out.push(truncate_header(&seg.header)); + add_collapsed_frames(&mut out, &seg.frames, app_package); + } + let root = segments.last().unwrap(); + out.push(truncate_header(&root.header)); + add_root_cause_frames(&mut out, &root.frames, app_package); + } + + if max_lines > 0 && out.len() > max_lines { + out = apply_hard_cap(out, &segments, max_lines); + } + + Some(out.join("\n")) +} + +// Temporary stub; real implementation in Task 7. +fn apply_hard_cap(out: Vec, _segments: &[Segment], max_lines: usize) -> Vec { + let mut out = out; + out.truncate(max_lines); + out +} + #[cfg(test)] mod tests { use super::*; @@ -398,6 +448,50 @@ mod tests { ); } + #[test] + fn process_empty_returns_none() { + assert!(process("", Some("com.example"), 0).is_none()); + assert!(process(" \n ", Some("com.example"), 0).is_none()); + } + + #[test] + fn process_single_segment_no_filter_returns_verbatim() { + let trace = "java.lang.RuntimeException: boom\n\tat com.example.A.foo(A.java:1)"; + let out = process(trace, None, 0).unwrap(); + assert_eq!(out, trace); + } + + #[test] + fn process_single_segment_collapses_framework() { + let trace = "java.lang.AssertionError: fail\n\ + \tat com.example.Test.t(Test.java:5)\n\ + \tat org.junit.runner.Run(Run.java:1)\n\ + \tat org.junit.runner.Run(Run.java:2)"; + let out = process(trace, Some("com.example"), 0).unwrap(); + assert_eq!( + out, + "java.lang.AssertionError: fail\n\ + \tat com.example.Test.t(Test.java:5)\n\ + \t... 2 framework frames omitted" + ); + } + + #[test] + fn process_multi_segment_preserves_root_cause() { + let trace = "java.lang.RuntimeException: outer\n\ + \tat org.spring.Foo(Foo.java:1)\n\ + Caused by: java.io.IOException: middle\n\ + \tat org.hibernate.Bar(Bar.java:2)\n\ + Caused by: java.net.ConnectException: inner\n\ + \tat com.example.DbService.connect(DbService.java:42)"; + let out = process(trace, Some("com.example"), 0).unwrap(); + assert!(out.contains("java.lang.RuntimeException: outer")); + assert!(out.contains("Caused by: java.io.IOException: middle")); + assert!(out.contains("Caused by: java.net.ConnectException: inner")); + assert!(out.contains("\tat com.example.DbService.connect(DbService.java:42)")); + assert!(out.contains("framework frames omitted")); + } + fn collect_collapsed(frames: &[&str], app_package: Option<&str>) -> Vec { let frames: Vec = frames.iter().map(|s| s.to_string()).collect(); let mut out = Vec::new(); From 07693ef08e8f6239b024a213ba9ccae22ef15949 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:12:34 +0200 Subject: [PATCH 30/44] feat(mvn): stack trace hard cap preserves root cause When root-cause header lies beyond the line cap, emit a synthetic layout with a truncated-intermediate marker so the diagnostic punchline survives. --- src/cmds/java/stack_trace.rs | 121 +++++++++++++++++++++++++++++++++-- 1 file changed, 114 insertions(+), 7 deletions(-) diff --git a/src/cmds/java/stack_trace.rs b/src/cmds/java/stack_trace.rs index 2f9d457fe..1b2af35c6 100644 --- a/src/cmds/java/stack_trace.rs +++ b/src/cmds/java/stack_trace.rs @@ -199,8 +199,8 @@ pub(crate) fn add_root_cause_frames( /// - Top-level header preserved (truncated to 200 chars). /// - Non-root segments: header + `add_collapsed_frames`. /// - Root (last) segment: header + `add_root_cause_frames`. -/// - If `max_lines > 0` and output exceeds the cap, apply hard-cap truncation -/// (implemented in a later task — currently returns full output). +/// - If `max_lines > 0` and the collapsed output exceeds the cap, +/// `apply_hard_cap` is called to truncate while preserving the root cause. /// /// Returns `None` iff `raw` is empty or whitespace-only. #[allow(dead_code)] @@ -238,11 +238,58 @@ pub fn process(raw: &str, app_package: Option<&str>, max_lines: usize) -> Option Some(out.join("\n")) } -// Temporary stub; real implementation in Task 7. -fn apply_hard_cap(out: Vec, _segments: &[Segment], max_lines: usize) -> Vec { - let mut out = out; - out.truncate(max_lines); - out +/// Apply a hard cap while preserving the root cause. +/// +/// - For a single segment: straight truncate to `max_lines`. +/// - For multiple segments: +/// - If the root-cause header's index in `out` is already beyond the cap, +/// build a synthetic output: `[top_header, "... (intermediate frames +/// truncated)", root_header, root frames until cap]`. +/// - Otherwise (root-cause header within the cap): straight truncate. +fn apply_hard_cap(out: Vec, segments: &[Segment], max_lines: usize) -> Vec { + if segments.len() <= 1 { + let mut out = out; + out.truncate(max_lines); + return out; + } + + let root = segments.last().unwrap(); + let truncated_root_header = truncate_header(&root.header); + let root_idx = out + .iter() + .rposition(|line| line == &truncated_root_header); + + let Some(idx) = root_idx else { + let mut out = out; + out.truncate(max_lines); + return out; + }; + + if idx < max_lines.saturating_sub(1) { + let mut out = out; + out.truncate(max_lines); + return out; + } + + // Root cause beyond the cap — build synthetic layout. + let mut result: Vec = Vec::with_capacity(max_lines); + if let Some(top) = out.first() { + result.push(top.clone()); + } + if max_lines >= 3 { + result.push("\t... (intermediate frames truncated)".to_string()); + } + result.push(truncated_root_header.clone()); + + let mut remaining = max_lines.saturating_sub(result.len()); + for line in &out[(idx + 1)..] { + if remaining == 0 { + break; + } + result.push(line.clone()); + remaining -= 1; + } + result } #[cfg(test)] @@ -561,4 +608,64 @@ mod tests { ] ); } + + #[test] + fn hard_cap_single_segment_simple_truncate() { + // Non-app frames collapse to one marker, so a 21-line input produces + // a 2-line out. Verify cap-not-triggered behavior first. + let mut trace = String::from("java.lang.RuntimeException: boom"); + for i in 0..20 { + trace.push_str(&format!("\n\tat com.example.A.m{i}(A.java:{i})")); + } + // With app_package=Some("com.example"), all frames are app frames. + // out = [header, 20 app frames]. With cap=5, out.len()=21 > 5 → + // straight-truncate to 5. + let out = process(&trace, Some("com.example"), 5).unwrap(); + assert_eq!(out.lines().count(), 5); + } + + #[test] + fn hard_cap_multi_segment_preserves_root_cause() { + // Two segments, each with enough app frames that even after + // framework-frame collapsing, the total pushed lines exceeds the cap + // AND the root-cause header sits at/beyond (max_lines - 1), forcing + // the synthetic "... (intermediate frames truncated)" layout. + let trace = "java.lang.RuntimeException: outer\n\ + \tat com.example.A.foo(A.java:1)\n\ + \tat com.example.A.bar(A.java:2)\n\ + \tat com.example.A.baz(A.java:3)\n\ + \tat com.example.A.qux(A.java:4)\n\ + Caused by: java.io.IOException: real cause\n\ + \tat com.example.DbService.connect(Db.java:88)\n\ + \tat com.example.DbService.prepare(Db.java:91)\n\ + \tat com.example.DbService.execute(Db.java:94)"; + // out from process(): [top_header, 4 app frames, root_header, 3 app frames] + // out.len() = 9. With max_lines = 5, root_idx = 5, 5 >= 5-1=4 → synthetic. + let out = process(trace, Some("com.example"), 5).unwrap(); + let lines: Vec<&str> = out.lines().collect(); + assert_eq!(lines.len(), 5, "must fit exactly in max_lines=5, got: {out}"); + assert_eq!(lines[0], "java.lang.RuntimeException: outer"); + assert_eq!(lines[1], "\t... (intermediate frames truncated)"); + assert_eq!(lines[2], "Caused by: java.io.IOException: real cause"); + assert!( + lines[3].contains("com.example.DbService"), + "first root frame must survive; got line 3: {:?}", + lines[3] + ); + } + + #[test] + fn hard_cap_multi_segment_root_within_limit_straight_truncate() { + // Root cause header at line 3 of output, cap at 10 → straight truncate. + let trace = "java.lang.RuntimeException: outer\n\ + \tat com.example.A.foo(A.java:1)\n\ + Caused by: java.io.IOException: inner\n\ + \tat com.example.B.bar(B.java:1)\n\ + \tat com.example.B.baz(B.java:2)\n\ + \tat com.example.B.qux(B.java:3)\n\ + \tat com.example.B.quux(B.java:4)\n\ + \tat com.example.B.corge(B.java:5)"; + let out = process(trace, Some("com.example"), 6).unwrap(); + assert_eq!(out.lines().count(), 6); + } } From 4750e5949beb1a3a15b4e10a1a70190c26bbfbf1 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:06:18 +0200 Subject: [PATCH 31/44] test(mvn): copy Surefire XML fixtures from maven-mcp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Covers passing, failing, failing-with-logs, skipped, and error cases — will feed surefire_reports parser tests in the next tasks. --- tests/fixtures/java/surefire-reports/.gitkeep | 0 .../TEST-com.example.ErrorTest.xml | 9 +++++++ .../TEST-com.example.FailingTest.xml | 16 +++++++++++++ .../TEST-com.example.FailingTestWithLogs.xml | 24 +++++++++++++++++++ .../TEST-com.example.PassingTest.xml | 6 +++++ .../TEST-com.example.SkippedTest.xml | 7 ++++++ 6 files changed, 62 insertions(+) delete mode 100644 tests/fixtures/java/surefire-reports/.gitkeep create mode 100644 tests/fixtures/java/surefire-reports/TEST-com.example.ErrorTest.xml create mode 100644 tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml create mode 100644 tests/fixtures/java/surefire-reports/TEST-com.example.FailingTestWithLogs.xml create mode 100644 tests/fixtures/java/surefire-reports/TEST-com.example.PassingTest.xml create mode 100644 tests/fixtures/java/surefire-reports/TEST-com.example.SkippedTest.xml diff --git a/tests/fixtures/java/surefire-reports/.gitkeep b/tests/fixtures/java/surefire-reports/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/fixtures/java/surefire-reports/TEST-com.example.ErrorTest.xml b/tests/fixtures/java/surefire-reports/TEST-com.example.ErrorTest.xml new file mode 100644 index 000000000..5fe9307fd --- /dev/null +++ b/tests/fixtures/java/surefire-reports/TEST-com.example.ErrorTest.xml @@ -0,0 +1,9 @@ + + + + + java.net.ConnectException: Connection refused + at java.base/sun.nio.ch.Net.connect(Net.java:579) + at com.example.ErrorTest.shouldNotThrow(ErrorTest.java:30) + + diff --git a/tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml b/tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml new file mode 100644 index 000000000..fb28cae26 --- /dev/null +++ b/tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml @@ -0,0 +1,16 @@ + + + + + org.opentest4j.AssertionFailedError: expected:<200> but was:<404> + at org.junit.jupiter.api.AssertEquals.assertEquals(AssertEquals.java:150) + at com.example.FailingTest.shouldReturnUser(FailingTest.java:42) + at java.base/java.lang.reflect.Method.invoke(Method.java:580) + + + java.lang.AssertionError: Unexpected exception: NullPointerException + at com.example.FailingTest.shouldHandleNull(FailingTest.java:55) + at java.base/java.lang.reflect.Method.invoke(Method.java:580) + + + diff --git a/tests/fixtures/java/surefire-reports/TEST-com.example.FailingTestWithLogs.xml b/tests/fixtures/java/surefire-reports/TEST-com.example.FailingTestWithLogs.xml new file mode 100644 index 000000000..c0a843c1a --- /dev/null +++ b/tests/fixtures/java/surefire-reports/TEST-com.example.FailingTestWithLogs.xml @@ -0,0 +1,24 @@ + + + + + org.opentest4j.AssertionFailedError: expected connection + at org.junit.jupiter.api.AssertEquals.assertEquals(AssertEquals.java:150) + at com.example.FailingTestWithLogs.shouldConnectToDb(FailingTestWithLogs.java:25) + DEBUG: Initializing connection pool +INFO: Attempting connection to localhost:5432 +WARN: Connection timeout after 5000ms +ERROR: Failed to establish connection + SLF4J: Failed to load class org.slf4j.impl.StaticLoggerBinder +STDERR: Connection refused + + + org.opentest4j.AssertionFailedError: data mismatch + at com.example.FailingTestWithLogs.shouldProcessData(FailingTestWithLogs.java:42) + INFO: Processing batch of 100 records +DEBUG: Record 50 processed successfully + + + This output belongs to a passing test and should NOT be extracted + + diff --git a/tests/fixtures/java/surefire-reports/TEST-com.example.PassingTest.xml b/tests/fixtures/java/surefire-reports/TEST-com.example.PassingTest.xml new file mode 100644 index 000000000..6b2087d37 --- /dev/null +++ b/tests/fixtures/java/surefire-reports/TEST-com.example.PassingTest.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/tests/fixtures/java/surefire-reports/TEST-com.example.SkippedTest.xml b/tests/fixtures/java/surefire-reports/TEST-com.example.SkippedTest.xml new file mode 100644 index 000000000..fc60faddc --- /dev/null +++ b/tests/fixtures/java/surefire-reports/TEST-com.example.SkippedTest.xml @@ -0,0 +1,7 @@ + + + + + + + From 60949db90d0155bede71a67c6556980dee4476ac Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:16:06 +0200 Subject: [PATCH 32/44] feat(mvn): parse Surefire XML testsuite via quick-xml Handles testsuite/testcase/failure/error/system-out/system-err with per-test 2000-char log limit and 50-line stack trace truncation. Classifies failure vs error by element name. --- src/cmds/java/surefire_reports.rs | 291 ++++++++++++++++++++++++++++++ 1 file changed, 291 insertions(+) diff --git a/src/cmds/java/surefire_reports.rs b/src/cmds/java/surefire_reports.rs index e679f22fa..e51bec70d 100644 --- a/src/cmds/java/surefire_reports.rs +++ b/src/cmds/java/surefire_reports.rs @@ -2,3 +2,294 @@ //! `target/surefire-reports/TEST-*.xml` and `target/failsafe-reports/*.xml`. //! Uses quick-xml streaming parser. Time-gated by `started_at` to skip stale //! reports from previous runs. + +use crate::cmds::java::stack_trace; +use quick_xml::events::{BytesStart, Event}; +use quick_xml::Reader; + +pub const DEFAULT_STACK_TRACE_LINES: usize = 50; +pub const DEFAULT_PER_TEST_OUTPUT_LIMIT: usize = 2000; +#[allow(dead_code)] +pub const DEFAULT_TOTAL_OUTPUT_LIMIT: usize = 10_000; + +#[derive(Debug, Default, PartialEq)] +pub struct TestSummary { + pub run: u32, + pub failures: u32, + pub errors: u32, + pub skipped: u32, +} + +impl TestSummary { + fn add(&mut self, other: &Self) { + self.run += other.run; + self.failures += other.failures; + self.errors += other.errors; + self.skipped += other.skipped; + } +} + +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum FailureKind { + Failure, + Error, +} + +#[derive(Debug, PartialEq)] +pub struct TestFailure { + pub test_class: String, + pub test_method: String, + pub kind: FailureKind, + pub message: Option, + pub failure_type: Option, + pub stack_trace: Option, + pub test_output: Option, +} + +#[derive(Debug, Default, PartialEq)] +pub struct SurefireResult { + pub summary: TestSummary, + pub failures: Vec, + pub files_read: usize, + pub files_skipped_stale: usize, + pub files_malformed: usize, +} + +fn local_name(name: &[u8]) -> &[u8] { + name.rsplit(|b| *b == b':').next().unwrap_or(name) +} + +fn extract_attr( + reader: &Reader<&[u8]>, + start: &BytesStart<'_>, + key: &[u8], +) -> Option { + for attr in start.attributes().flatten() { + if local_name(attr.key.as_ref()) != key { + continue; + } + if let Ok(value) = attr.decode_and_unescape_value(reader.decoder()) { + return Some(value.into_owned()); + } + } + None +} + +fn parse_u32_attr(reader: &Reader<&[u8]>, start: &BytesStart<'_>, key: &[u8]) -> u32 { + extract_attr(reader, start, key) + .and_then(|v| v.parse::().ok()) + .unwrap_or(0) +} + +/// Parse a single Surefire XML testsuite string into a partial result. +/// `app_package` is passed to `stack_trace::process` for frame classification. +/// +/// Returns `None` only if the XML is completely malformed; otherwise a +/// best-effort result is returned. +#[allow(dead_code)] +pub(crate) fn parse_content(xml: &str, app_package: Option<&str>) -> Option { + #[derive(Clone, Copy, PartialEq)] + enum CaptureField { + StackTrace, + SystemOut, + SystemErr, + } + + let mut reader = Reader::from_str(xml); + reader.config_mut().trim_text(false); + let mut buf = Vec::new(); + + let mut result = SurefireResult::default(); + let mut saw_testsuite = false; + let mut current_class: Option = None; + let mut current_method: Option = None; + let mut current_has_failure = false; + + let mut pending_message: Option = None; + let mut pending_type: Option = None; + let mut pending_kind: Option = None; + let mut stack_buf = String::new(); + let mut stdout_buf = String::new(); + let mut stderr_buf = String::new(); + let mut capture: Option = None; + + loop { + match reader.read_event_into(&mut buf) { + Ok(Event::Start(e)) | Ok(Event::Empty(e)) => { + match local_name(e.name().as_ref()) { + b"testsuite" => { + saw_testsuite = true; + let file_summary = TestSummary { + run: parse_u32_attr(&reader, &e, b"tests"), + failures: parse_u32_attr(&reader, &e, b"failures"), + errors: parse_u32_attr(&reader, &e, b"errors"), + skipped: parse_u32_attr(&reader, &e, b"skipped"), + }; + result.summary.add(&file_summary); + } + b"testcase" => { + current_class = extract_attr(&reader, &e, b"classname"); + current_method = extract_attr(&reader, &e, b"name"); + current_has_failure = false; + } + b"failure" | b"error" => { + let kind = if local_name(e.name().as_ref()) == b"failure" { + FailureKind::Failure + } else { + FailureKind::Error + }; + pending_message = extract_attr(&reader, &e, b"message"); + pending_type = extract_attr(&reader, &e, b"type"); + pending_kind = Some(kind); + stack_buf.clear(); + capture = Some(CaptureField::StackTrace); + current_has_failure = true; + } + b"system-out" if current_has_failure => { + stdout_buf.clear(); + capture = Some(CaptureField::SystemOut); + } + b"system-err" if current_has_failure => { + stderr_buf.clear(); + capture = Some(CaptureField::SystemErr); + } + _ => {} + } + } + Ok(Event::Text(t)) => { + if let Some(field) = capture { + if let Ok(text) = t.unescape() { + match field { + CaptureField::StackTrace => stack_buf.push_str(&text), + CaptureField::SystemOut => stdout_buf.push_str(&text), + CaptureField::SystemErr => stderr_buf.push_str(&text), + } + } + } + } + Ok(Event::End(e)) => { + match local_name(e.name().as_ref()) { + b"failure" | b"error" => { + let processed = stack_trace::process( + stack_buf.trim(), + app_package, + DEFAULT_STACK_TRACE_LINES, + ); + result.failures.push(TestFailure { + test_class: current_class.clone().unwrap_or_default(), + test_method: current_method.clone().unwrap_or_default(), + kind: pending_kind.take().unwrap_or(FailureKind::Failure), + message: pending_message + .take() + .filter(|s| !s.is_empty()) + .map(|s| stack_trace::truncate_header(&s)), + failure_type: pending_type.take().filter(|s| !s.is_empty()), + stack_trace: processed, + test_output: None, + }); + capture = None; + } + b"system-out" | b"system-err" => { + capture = None; + } + b"testcase" => { + let combined = combine_test_output( + &stdout_buf, + &stderr_buf, + DEFAULT_PER_TEST_OUTPUT_LIMIT, + ); + stdout_buf.clear(); + stderr_buf.clear(); + if let Some(combined) = combined { + if let Some(last) = result.failures.last_mut() { + if last.test_class == current_class.clone().unwrap_or_default() + && last.test_method + == current_method.clone().unwrap_or_default() + { + last.test_output = Some(combined); + } + } + } + current_class = None; + current_method = None; + current_has_failure = false; + } + _ => {} + } + } + Ok(Event::Eof) => break, + Err(_) => return None, + _ => {} + } + buf.clear(); + } + + if !saw_testsuite { + return None; + } + + Some(result) +} + +fn combine_test_output(stdout: &str, stderr: &str, per_test_limit: usize) -> Option { + let stdout = stdout.trim(); + let stderr = stderr.trim(); + if stdout.is_empty() && stderr.is_empty() { + return None; + } + let mut combined = String::new(); + if !stdout.is_empty() { + combined.push_str(stdout); + } + if !stderr.is_empty() { + if !combined.is_empty() { + combined.push_str("\n[STDERR]\n"); + } else { + combined.push_str("[STDERR]\n"); + } + combined.push_str(stderr); + } + Some(truncate_test_output(&combined, per_test_limit)) +} + +fn truncate_test_output(output: &str, max_chars: usize) -> String { + let char_count = output.chars().count(); + if char_count <= max_chars { + return output.to_string(); + } + let skip = char_count - max_chars; + let tail: String = output.chars().skip(skip).collect(); + format!("... ({skip} chars truncated)\n{tail}") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_content_single_passing() { + let xml = include_str!( + "../../../tests/fixtures/java/surefire-reports/TEST-com.example.PassingTest.xml" + ); + let result = parse_content(xml, None).expect("passing testsuite parses"); + assert!(result.summary.run >= 1); + assert_eq!(result.summary.failures, 0); + assert_eq!(result.summary.errors, 0); + assert!(result.failures.is_empty()); + } + + #[test] + fn parse_content_single_failing_extracts_details() { + let xml = include_str!( + "../../../tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml" + ); + let result = parse_content(xml, None).expect("failing testsuite parses"); + assert_eq!(result.summary.failures, 2); + assert_eq!(result.failures.len(), 2); + let first = &result.failures[0]; + assert_eq!(first.test_class, "com.example.FailingTest"); + assert!(first.message.as_deref().unwrap_or("").contains("expected")); + assert!(first.stack_trace.is_some()); + assert_eq!(first.kind, FailureKind::Failure); + } +} From 7f1cba6fd4024a863d76772e0a2209eaef6038b6 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:21:21 +0200 Subject: [PATCH 33/44] test(mvn): cover Surefire system-out/err capture and kinds Asserts passing test system-out is not leaked, error vs failure kinds are distinguished, and skipped counts are preserved. --- src/cmds/java/surefire_reports.rs | 59 +++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/src/cmds/java/surefire_reports.rs b/src/cmds/java/surefire_reports.rs index e51bec70d..f993ae930 100644 --- a/src/cmds/java/surefire_reports.rs +++ b/src/cmds/java/surefire_reports.rs @@ -292,4 +292,63 @@ mod tests { assert!(first.stack_trace.is_some()); assert_eq!(first.kind, FailureKind::Failure); } + + #[test] + fn parse_content_captures_system_out_err_only_for_failed_tests() { + let xml = include_str!( + "../../../tests/fixtures/java/surefire-reports/TEST-com.example.FailingTestWithLogs.xml" + ); + let result = parse_content(xml, None).expect("parses"); + assert_eq!(result.failures.len(), 2); + let with_both_streams = result + .failures + .iter() + .find(|f| f.test_method == "shouldConnectToDb") + .expect("shouldConnectToDb present"); + let output = with_both_streams + .test_output + .as_deref() + .expect("test_output captured"); + assert!(output.contains("Initializing connection pool")); + assert!(output.contains("[STDERR]")); + assert!(output.contains("Connection refused")); + + let with_stdout_only = result + .failures + .iter() + .find(|f| f.test_method == "shouldProcessData") + .expect("shouldProcessData present"); + let output = with_stdout_only.test_output.as_deref().unwrap_or(""); + assert!(output.contains("Processing batch")); + assert!(!output.contains("[STDERR]")); + + // Passing test's must NOT be captured + let passing_system_out_text = "This output belongs to a passing test"; + for failure in &result.failures { + if let Some(out) = &failure.test_output { + assert!( + !out.contains(passing_system_out_text), + "passing-test stdout must not leak into a failure's test_output" + ); + } + } + } + + #[test] + fn parse_content_error_testsuite_marks_failure_kind_error() { + let xml = include_str!( + "../../../tests/fixtures/java/surefire-reports/TEST-com.example.ErrorTest.xml" + ); + let result = parse_content(xml, None).expect("parses"); + assert!(result.failures.iter().any(|f| f.kind == FailureKind::Error)); + } + + #[test] + fn parse_content_skipped_testsuite_counts_skipped() { + let xml = include_str!( + "../../../tests/fixtures/java/surefire-reports/TEST-com.example.SkippedTest.xml" + ); + let result = parse_content(xml, None).expect("parses"); + assert!(result.summary.skipped > 0); + } } From b8544dc73f6d52e06b57569fc58050eacd90b46e Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:28:30 +0200 Subject: [PATCH 34/44] feat(mvn): surefire parse_dir with mtime time-gate Aggregates TEST-*.xml files; filters stale by mtime >= since; counts malformed files without crashing. Applies total-output-limit across failures. --- src/cmds/java/surefire_reports.rs | 202 ++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) diff --git a/src/cmds/java/surefire_reports.rs b/src/cmds/java/surefire_reports.rs index f993ae930..95c6b0107 100644 --- a/src/cmds/java/surefire_reports.rs +++ b/src/cmds/java/surefire_reports.rs @@ -6,6 +6,8 @@ use crate::cmds::java::stack_trace; use quick_xml::events::{BytesStart, Event}; use quick_xml::Reader; +use std::path::Path; +use std::time::SystemTime; pub const DEFAULT_STACK_TRACE_LINES: usize = 50; pub const DEFAULT_PER_TEST_OUTPUT_LIMIT: usize = 2000; @@ -262,9 +264,209 @@ fn truncate_test_output(output: &str, max_chars: usize) -> String { format!("... ({skip} chars truncated)\n{tail}") } +/// Scan a directory for `TEST-*.xml` files and merge their parsed results. +/// +/// - Files whose `mtime < since` are skipped and counted in `files_skipped_stale`. +/// - Files that parse to `None` (malformed) count in `files_malformed`. +/// - Returns `None` only if the directory does not exist or is empty. +#[allow(dead_code)] +pub fn parse_dir( + dir: &Path, + since: Option, + app_package: Option<&str>, +) -> Option { + parse_dir_with_limits( + dir, + since, + app_package, + DEFAULT_PER_TEST_OUTPUT_LIMIT, + DEFAULT_TOTAL_OUTPUT_LIMIT, + DEFAULT_STACK_TRACE_LINES, + ) +} + +#[allow(dead_code)] +pub fn parse_dir_with_limits( + dir: &Path, + since: Option, + app_package: Option<&str>, + _per_test_output_limit: usize, + total_output_limit: usize, + _stack_trace_lines: usize, +) -> Option { + if !dir.exists() || !dir.is_dir() { + return None; + } + + let entries = std::fs::read_dir(dir).ok()?; + let mut aggregate = SurefireResult::default(); + let mut any_candidate = false; + + for entry in entries.flatten() { + let path = entry.path(); + let Some(name) = path.file_name().and_then(|s| s.to_str()) else { + continue; + }; + if !name.starts_with("TEST-") || !name.ends_with(".xml") { + continue; + } + any_candidate = true; + + if let Some(since) = since { + let modified = entry.metadata().ok().and_then(|m| m.modified().ok()); + match modified { + Some(m) if m >= since => {} + Some(_) => { + aggregate.files_skipped_stale += 1; + continue; + } + None => { + aggregate.files_skipped_stale += 1; + continue; + } + } + } + + let Ok(content) = std::fs::read_to_string(&path) else { + aggregate.files_malformed += 1; + eprintln!("rtk mvn: skipping unreadable {}", name); + continue; + }; + + match parse_content(&content, app_package) { + Some(file_result) => { + aggregate.files_read += 1; + aggregate.summary.add(&file_result.summary); + aggregate.failures.extend(file_result.failures); + } + None => { + aggregate.files_malformed += 1; + eprintln!("rtk mvn: skipping malformed {}", name); + } + } + } + + if !any_candidate { + return None; + } + + apply_total_output_limit(&mut aggregate.failures, total_output_limit); + Some(aggregate) +} + +#[allow(dead_code)] +fn apply_total_output_limit(failures: &mut [TestFailure], total_limit: usize) { + let mut budget = total_limit; + let mut exhausted = false; + for failure in failures.iter_mut() { + if exhausted { + failure.test_output = None; + continue; + } + if let Some(out) = &failure.test_output { + let len = out.chars().count(); + if len > budget { + failure.test_output = None; + exhausted = true; + } else { + budget -= len; + } + } + } +} + #[cfg(test)] mod tests { use super::*; + use std::time::{Duration, SystemTime}; + + fn copy_fixture( + tmp: &tempfile::TempDir, + fixture_name: &str, + mtime: Option, + ) -> std::path::PathBuf { + let src = std::path::Path::new("tests/fixtures/java/surefire-reports").join(fixture_name); + let dst = tmp.path().join(fixture_name); + std::fs::copy(&src, &dst).expect("copy fixture"); + if let Some(mtime) = mtime { + filetime::set_file_mtime(&dst, filetime::FileTime::from_system_time(mtime)) + .expect("set mtime"); + } + dst + } + + #[test] + fn parse_dir_missing_returns_none() { + assert!(super::parse_dir( + std::path::Path::new("/definitely/does/not/exist/rtk-test"), + None, + None + ) + .is_none()); + } + + #[test] + fn parse_dir_empty_returns_none() { + let tmp = tempfile::tempdir().unwrap(); + assert!(super::parse_dir(tmp.path(), None, None).is_none()); + } + + #[test] + fn parse_dir_ignores_non_test_prefix_files() { + let tmp = tempfile::tempdir().unwrap(); + copy_fixture(&tmp, "TEST-com.example.PassingTest.xml", None); + std::fs::write(tmp.path().join("summary.xml"), "").unwrap(); + std::fs::write(tmp.path().join("other.txt"), "hi").unwrap(); + + let result = super::parse_dir(tmp.path(), None, None).expect("parses"); + assert_eq!(result.files_read, 1); + } + + #[test] + fn parse_dir_aggregates_multi_file_counts() { + let tmp = tempfile::tempdir().unwrap(); + copy_fixture(&tmp, "TEST-com.example.PassingTest.xml", None); + copy_fixture(&tmp, "TEST-com.example.FailingTest.xml", None); + copy_fixture(&tmp, "TEST-com.example.SkippedTest.xml", None); + + let result = super::parse_dir(tmp.path(), None, None).expect("parses"); + assert_eq!(result.files_read, 3); + assert!(result.summary.run >= 3); + assert!(result.summary.failures >= 2); + assert!(result.summary.skipped >= 1); + } + + #[test] + fn parse_dir_time_gate_skips_stale_files() { + let tmp = tempfile::tempdir().unwrap(); + let now = SystemTime::now(); + let stale = now - Duration::from_secs(60 * 60); // 1h ago + let fresh = now + Duration::from_millis(50); + + copy_fixture(&tmp, "TEST-com.example.PassingTest.xml", Some(stale)); + copy_fixture(&tmp, "TEST-com.example.FailingTest.xml", Some(fresh)); + + let since = now; + let result = super::parse_dir(tmp.path(), Some(since), None).expect("parses"); + assert_eq!(result.files_read, 1, "only the fresh file counts"); + assert_eq!(result.files_skipped_stale, 1); + assert_eq!(result.summary.failures, 2, "from FailingTest only"); + } + + #[test] + fn parse_dir_malformed_counts_but_continues() { + let tmp = tempfile::tempdir().unwrap(); + copy_fixture(&tmp, "TEST-com.example.PassingTest.xml", None); + std::fs::write( + tmp.path().join("TEST-com.example.Broken.xml"), + ">>>", + ) + .unwrap(); + + let result = super::parse_dir(tmp.path(), None, None).expect("parses"); + assert_eq!(result.files_read, 1); + assert_eq!(result.files_malformed, 1); + } #[test] fn parse_content_single_passing() { From 98e969564c2a0153cd133807d9db4a7d83514c27 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:32:31 +0200 Subject: [PATCH 35/44] test(mvn): pin total-output-limit cutoff behavior Asserts the third 4KB test_output is nulled when 10000-char budget is exhausted. --- src/cmds/java/surefire_reports.rs | 40 +++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/src/cmds/java/surefire_reports.rs b/src/cmds/java/surefire_reports.rs index 95c6b0107..326b03e2e 100644 --- a/src/cmds/java/surefire_reports.rs +++ b/src/cmds/java/surefire_reports.rs @@ -553,4 +553,44 @@ mod tests { let result = parse_content(xml, None).expect("parses"); assert!(result.summary.skipped > 0); } + + #[test] + fn apply_total_output_limit_nulls_out_excess() { + let mut failures = vec![ + TestFailure { + test_class: "A".into(), + test_method: "m1".into(), + kind: FailureKind::Failure, + message: None, + failure_type: None, + stack_trace: None, + test_output: Some("a".repeat(4000)), + }, + TestFailure { + test_class: "A".into(), + test_method: "m2".into(), + kind: FailureKind::Failure, + message: None, + failure_type: None, + stack_trace: None, + test_output: Some("b".repeat(4000)), + }, + TestFailure { + test_class: "A".into(), + test_method: "m3".into(), + kind: FailureKind::Failure, + message: None, + failure_type: None, + stack_trace: None, + test_output: Some("c".repeat(4000)), + }, + ]; + super::apply_total_output_limit(&mut failures, 10_000); + assert!(failures[0].test_output.is_some()); + assert!(failures[1].test_output.is_some()); + assert!( + failures[2].test_output.is_none(), + "third should exceed 10k cumulative" + ); + } } From ba39804987f5eb61a8a7aff60c7fa4418e0ec730 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:35:48 +0200 Subject: [PATCH 36/44] feat(mvn): detect appPackage from pom.xml groupId Reads top-level / with fallback to /. RTK_MVN_APP_PACKAGE env var overrides. Malformed POMs return None. --- src/cmds/java/pom_groupid.rs | 167 ++++++++++++++++++ tests/fixtures/java/poms/.gitkeep | 0 tests/fixtures/java/poms/child-pom.xml | 10 ++ tests/fixtures/java/poms/no-groupid-pom.xml | 6 + .../fixtures/java/poms/single-module-pom.xml | 7 + 5 files changed, 190 insertions(+) delete mode 100644 tests/fixtures/java/poms/.gitkeep create mode 100644 tests/fixtures/java/poms/child-pom.xml create mode 100644 tests/fixtures/java/poms/no-groupid-pom.xml create mode 100644 tests/fixtures/java/poms/single-module-pom.xml diff --git a/src/cmds/java/pom_groupid.rs b/src/cmds/java/pom_groupid.rs index 6763e873d..a64bf7b80 100644 --- a/src/cmds/java/pom_groupid.rs +++ b/src/cmds/java/pom_groupid.rs @@ -1,3 +1,170 @@ //! Autodetects the application Java package from `pom.xml `. //! Used by `surefire_reports` / `stack_trace` to classify application frames. //! Can be overridden by `RTK_MVN_APP_PACKAGE` env var. + +use quick_xml::events::Event; +use quick_xml::Reader; +use std::path::Path; + +const OVERRIDE_ENV: &str = "RTK_MVN_APP_PACKAGE"; + +/// Detect the Maven groupId of `cwd`'s `pom.xml`. +/// +/// Resolution order: +/// 1. If env var `RTK_MVN_APP_PACKAGE` is set and non-empty, return it. +/// 2. Read `cwd/pom.xml` and extract top-level `/`. +/// 3. Fall back to `//`. +/// 4. Otherwise `None`. +pub fn detect(cwd: &Path) -> Option { + if let Ok(value) = std::env::var(OVERRIDE_ENV) { + let trimmed = value.trim(); + if !trimmed.is_empty() { + return Some(trimmed.to_string()); + } + } + + let pom_path = cwd.join("pom.xml"); + let content = std::fs::read_to_string(&pom_path).ok()?; + extract_groupid(&content) +} + +pub(crate) fn extract_groupid(xml: &str) -> Option { + let mut reader = Reader::from_str(xml); + reader.config_mut().trim_text(true); + let mut buf = Vec::new(); + + // Tag stack tracked as simple Vec of local names. + let mut stack: Vec = Vec::new(); + let mut top_level_groupid: Option = None; + let mut parent_groupid: Option = None; + let mut capture: Option = None; + + loop { + match reader.read_event_into(&mut buf) { + Ok(Event::Start(e)) => { + let name = std::str::from_utf8(e.name().as_ref()) + .ok() + .and_then(|s| s.rsplit(':').next()) + .unwrap_or("") + .to_string(); + stack.push(name.clone()); + + if is_top_level_groupid(&stack) || is_parent_groupid(&stack) { + capture = Some(name); + } + } + Ok(Event::Text(t)) => { + if capture.is_some() { + if let Ok(text) = t.unescape() { + let text = text.trim(); + if !text.is_empty() { + if is_top_level_groupid(&stack) && top_level_groupid.is_none() { + top_level_groupid = Some(text.to_string()); + } else if is_parent_groupid(&stack) && parent_groupid.is_none() { + parent_groupid = Some(text.to_string()); + } + } + } + } + } + Ok(Event::End(_)) => { + stack.pop(); + capture = None; + if top_level_groupid.is_some() { + break; + } + } + Ok(Event::Eof) => break, + Err(_) => return None, + _ => {} + } + buf.clear(); + } + + top_level_groupid.or(parent_groupid) +} + +fn is_top_level_groupid(stack: &[String]) -> bool { + stack.len() == 2 && stack[0] == "project" && stack[1] == "groupId" +} + +fn is_parent_groupid(stack: &[String]) -> bool { + stack.len() == 3 + && stack[0] == "project" + && stack[1] == "parent" + && stack[2] == "groupId" +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn extract_single_module_groupid() { + let xml = include_str!("../../../tests/fixtures/java/poms/single-module-pom.xml"); + assert_eq!(extract_groupid(xml).as_deref(), Some("com.example.app")); + } + + #[test] + fn extract_falls_back_to_parent_groupid() { + let xml = include_str!("../../../tests/fixtures/java/poms/child-pom.xml"); + assert_eq!(extract_groupid(xml).as_deref(), Some("com.example.parent")); + } + + #[test] + fn extract_no_groupid_returns_none() { + let xml = include_str!("../../../tests/fixtures/java/poms/no-groupid-pom.xml"); + assert!(extract_groupid(xml).is_none()); + } + + #[test] + fn extract_malformed_returns_none() { + assert!(extract_groupid(">>>").is_none()); + } + + #[test] + fn detect_missing_pom_returns_none() { + let tmp = tempfile::tempdir().unwrap(); + assert!(detect(tmp.path()).is_none()); + } + + #[test] + fn detect_env_override_wins() { + let tmp = tempfile::tempdir().unwrap(); + std::fs::copy( + "tests/fixtures/java/poms/single-module-pom.xml", + tmp.path().join("pom.xml"), + ) + .unwrap(); + + // Serial to avoid concurrent env mutation with other tests — this is + // tested in isolation; we restore the var on exit. + let guard = EnvGuard::set(OVERRIDE_ENV, "com.override"); + assert_eq!(detect(tmp.path()).as_deref(), Some("com.override")); + drop(guard); + } + + struct EnvGuard { + key: &'static str, + original: Option, + } + + impl EnvGuard { + fn set(key: &'static str, value: &str) -> Self { + let original = std::env::var(key).ok(); + // SAFETY: single-threaded test; no other thread reads this env var. + unsafe { std::env::set_var(key, value) }; + Self { key, original } + } + } + + impl Drop for EnvGuard { + fn drop(&mut self) { + // SAFETY: single-threaded test; restoring env var on drop. + match &self.original { + Some(v) => unsafe { std::env::set_var(self.key, v) }, + None => unsafe { std::env::remove_var(self.key) }, + } + } + } +} diff --git a/tests/fixtures/java/poms/.gitkeep b/tests/fixtures/java/poms/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/fixtures/java/poms/child-pom.xml b/tests/fixtures/java/poms/child-pom.xml new file mode 100644 index 000000000..050da017f --- /dev/null +++ b/tests/fixtures/java/poms/child-pom.xml @@ -0,0 +1,10 @@ + + + 4.0.0 + + com.example.parent + parent + 1.0.0 + + child + diff --git a/tests/fixtures/java/poms/no-groupid-pom.xml b/tests/fixtures/java/poms/no-groupid-pom.xml new file mode 100644 index 000000000..c6ff96f69 --- /dev/null +++ b/tests/fixtures/java/poms/no-groupid-pom.xml @@ -0,0 +1,6 @@ + + + 4.0.0 + orphan + 1.0.0 + diff --git a/tests/fixtures/java/poms/single-module-pom.xml b/tests/fixtures/java/poms/single-module-pom.xml new file mode 100644 index 000000000..d6226df58 --- /dev/null +++ b/tests/fixtures/java/poms/single-module-pom.xml @@ -0,0 +1,7 @@ + + + 4.0.0 + com.example.app + single + 1.0.0 + From ddc0b39c02b44d0aea8318b0c934b7497065be2b Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:40:46 +0200 Subject: [PATCH 37/44] test(mvn): add failsafe + real-world stack trace fixtures Two failsafe-report XMLs (ApplicationContext failure, port conflict) and a Spring Caused-by chain for stack_trace::process coverage. --- src/cmds/java/stack_trace.rs | 10 ++++++++++ tests/fixtures/java/failsafe-reports/.gitkeep | 0 .../TEST-com.example.DbIntegrationIT.xml | 20 +++++++++++++++++++ .../TEST-com.example.PortConflictIT.xml | 9 +++++++++ tests/fixtures/java/stack-traces/.gitkeep | 0 .../java/stack-traces/multi-caused-by.txt | 10 ++++++++++ 6 files changed, 49 insertions(+) delete mode 100644 tests/fixtures/java/failsafe-reports/.gitkeep create mode 100644 tests/fixtures/java/failsafe-reports/TEST-com.example.DbIntegrationIT.xml create mode 100644 tests/fixtures/java/failsafe-reports/TEST-com.example.PortConflictIT.xml delete mode 100644 tests/fixtures/java/stack-traces/.gitkeep create mode 100644 tests/fixtures/java/stack-traces/multi-caused-by.txt diff --git a/src/cmds/java/stack_trace.rs b/src/cmds/java/stack_trace.rs index 1b2af35c6..f51adf891 100644 --- a/src/cmds/java/stack_trace.rs +++ b/src/cmds/java/stack_trace.rs @@ -668,4 +668,14 @@ mod tests { let out = process(trace, Some("com.example"), 6).unwrap(); assert_eq!(out.lines().count(), 6); } + + #[test] + fn process_real_world_spring_fixture() { + let trace = include_str!("../../../tests/fixtures/java/stack-traces/multi-caused-by.txt"); + let out = process(trace, Some("com.example"), 50).unwrap(); + assert!(out.contains("Caused by: org.springframework.beans.factory.BeanCreationException")); + assert!(out.contains("Caused by: org.hibernate.HibernateException")); + assert!(out.contains("com.example.DbIntegrationIT.shouldConnect")); + assert!(out.contains("framework frames omitted")); + } } diff --git a/tests/fixtures/java/failsafe-reports/.gitkeep b/tests/fixtures/java/failsafe-reports/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/fixtures/java/failsafe-reports/TEST-com.example.DbIntegrationIT.xml b/tests/fixtures/java/failsafe-reports/TEST-com.example.DbIntegrationIT.xml new file mode 100644 index 000000000..ae4b796d9 --- /dev/null +++ b/tests/fixtures/java/failsafe-reports/TEST-com.example.DbIntegrationIT.xml @@ -0,0 +1,20 @@ + + + + + java.lang.IllegalStateException: Failed to load ApplicationContext + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:180) + at org.springframework.test.context.support.DefaultTestContext.getApplicationContext(DefaultTestContext.java:124) + at org.springframework.test.context.support.DependencyInjectionTestExecutionListener.injectDependencies(DependencyInjectionTestExecutionListener.java:118) +Caused by: org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'dataSource' + at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:628) + at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:542) + at org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration.dataSource(DataSourceAutoConfiguration.java:114) +Caused by: org.hibernate.HibernateException: Unable to acquire JDBC Connection; nested exception is java.sql.SQLTransientConnectionException: HikariPool-1 - Connection is not available, request timed out after 30000ms. + at org.hibernate.internal.SessionFactoryImpl.createEntityManagerFactory(SessionFactoryImpl.java:512) + at com.example.DbIntegrationIT.shouldConnect(DbIntegrationIT.java:88) + at java.base/java.lang.reflect.Method.invoke(Method.java:580) + 2026-04-15 10:42:17 ERROR HikariDataSource - HikariPool-1 - Connection is not available +Connection refused (Connection refused) + + diff --git a/tests/fixtures/java/failsafe-reports/TEST-com.example.PortConflictIT.xml b/tests/fixtures/java/failsafe-reports/TEST-com.example.PortConflictIT.xml new file mode 100644 index 000000000..f96d0bf1a --- /dev/null +++ b/tests/fixtures/java/failsafe-reports/TEST-com.example.PortConflictIT.xml @@ -0,0 +1,9 @@ + + + + java.net.BindException: Address already in use + at java.base/sun.nio.ch.Net.bind0(Native Method) + at java.base/sun.nio.ch.Net.bind(Net.java:555) + at com.example.PortConflictIT.shouldStartServer(PortConflictIT.java:42) + + diff --git a/tests/fixtures/java/stack-traces/.gitkeep b/tests/fixtures/java/stack-traces/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/fixtures/java/stack-traces/multi-caused-by.txt b/tests/fixtures/java/stack-traces/multi-caused-by.txt new file mode 100644 index 000000000..b4bab03d1 --- /dev/null +++ b/tests/fixtures/java/stack-traces/multi-caused-by.txt @@ -0,0 +1,10 @@ +java.lang.IllegalStateException: Failed to load ApplicationContext + at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:180) + at org.springframework.test.context.support.DefaultTestContext.getApplicationContext(DefaultTestContext.java:124) +Caused by: org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'dataSource' + at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:628) + at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:542) +Caused by: org.hibernate.HibernateException: Unable to acquire JDBC Connection + at org.hibernate.internal.SessionFactoryImpl.createEntityManagerFactory(SessionFactoryImpl.java:512) + at com.example.DbIntegrationIT.shouldConnect(DbIntegrationIT.java:88) + at java.base/java.lang.reflect.Method.invoke(Method.java:580) From f7cfc66d93344fde446d3e38b711c69b32aae6dd Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:44:16 +0200 Subject: [PATCH 38/44] refactor(mvn): wire started_at/cwd/app_pkg into run_test Prepares scaffolding for XML report enrichment. enrich_with_reports is currently an identity function; real logic lands in the next commit. --- src/cmds/java/mvn_cmd.rs | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/src/cmds/java/mvn_cmd.rs b/src/cmds/java/mvn_cmd.rs index 7e23fcf7c..5f1cc4cdf 100644 --- a/src/cmds/java/mvn_cmd.rs +++ b/src/cmds/java/mvn_cmd.rs @@ -114,11 +114,26 @@ pub fn run_test(args: &[String], verbose: u8) -> Result { eprintln!("Running: mvn test {}", args.join(" ")); } + let started_at = std::time::SystemTime::now(); + let cwd = std::env::current_dir().unwrap_or_else(|_| std::path::PathBuf::from(".")); + let app_pkg = crate::cmds::java::pom_groupid::detect(&cwd); + + let cwd_for_filter = cwd.clone(); + let app_pkg_for_filter = app_pkg.clone(); + runner::run_filtered( cmd, "mvn test", &args.join(" "), - filter_mvn_test, + move |raw: &str| { + let filtered = filter_mvn_test(raw); + enrich_with_reports( + &filtered, + &cwd_for_filter, + started_at, + app_pkg_for_filter.as_deref(), + ) + }, runner::RunOptions::with_tee("mvn_test"), ) } @@ -353,6 +368,20 @@ fn parse_counts(caps: ®ex::Captures) -> TestCounts { } } +/// Identity placeholder for Surefire XML enrichment (Task 16). +/// +/// Receives the already-filtered test output and will eventually append failure +/// details parsed from Surefire XML reports found under `cwd`. The `since` +/// timestamp selects only report files written after the test run started. +fn enrich_with_reports( + text: &str, + _cwd: &std::path::Path, + _since: std::time::SystemTime, + _app_package: Option<&str>, +) -> String { + text.to_string() +} + /// Filter `mvn test` output using a state machine parser. /// /// States: Preamble -> Testing -> Summary -> Done From 5d0d7201f28241a674e84b26a20b722dd798c32d Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:49:17 +0200 Subject: [PATCH 39/44] feat(mvn): enrich test output with Surefire/Failsafe XML Appends a structured Failures section for each report directory, with per-failure stack trace (framework-frame-collapsed), optional captured output, and a reports-processed footer. Short-circuits on happy path to avoid I/O. Emits a red-flag message when 'no tests run' is reported but also no fresh XML reports are present. --- src/cmds/java/mvn_cmd.rs | 261 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 250 insertions(+), 11 deletions(-) diff --git a/src/cmds/java/mvn_cmd.rs b/src/cmds/java/mvn_cmd.rs index 5f1cc4cdf..a5f269b66 100644 --- a/src/cmds/java/mvn_cmd.rs +++ b/src/cmds/java/mvn_cmd.rs @@ -4,6 +4,7 @@ //! Preamble -> Testing -> Summary -> Done. //! Strips thousands of noise lines to compact failure reports (99%+ savings). +use crate::cmds::java::surefire_reports::{self, FailureKind, SurefireResult, TestFailure}; use crate::core::runner; use crate::core::tracking; use crate::core::utils::{exit_code_from_status, resolved_command, strip_ansi, truncate}; @@ -18,6 +19,8 @@ const INFO_TAG: &str = "[INFO]"; const ERROR_TAG: &str = "[ERROR]"; const WARNING_TAG: &str = "[WARNING]"; +const MAX_FAILURES_PER_SOURCE: usize = 10; + lazy_static! { static ref TESTS_RUN_RE: Regex = Regex::new(r"Tests run:\s*(\d+),\s*Failures:\s*(\d+),\s*Errors:\s*(\d+),\s*Skipped:\s*(\d+)") @@ -368,18 +371,166 @@ fn parse_counts(caps: ®ex::Captures) -> TestCounts { } } -/// Identity placeholder for Surefire XML enrichment (Task 16). -/// -/// Receives the already-filtered test output and will eventually append failure -/// details parsed from Surefire XML reports found under `cwd`. The `since` -/// timestamp selects only report files written after the test run started. -fn enrich_with_reports( - text: &str, - _cwd: &std::path::Path, - _since: std::time::SystemTime, - _app_package: Option<&str>, +/// Wrap the text-filter summary with structured failure details sourced from +/// `target/surefire-reports/` and `target/failsafe-reports/` XML files. +pub(crate) fn enrich_with_reports( + text_summary: &str, + cwd: &std::path::Path, + since: std::time::SystemTime, + app_package: Option<&str>, +) -> String { + if !text_summary.starts_with("mvn ") { + return text_summary.to_string(); + } + + let zero_tests = text_summary == "mvn test: no tests run" + || text_summary.contains("0 passed"); + let has_failures = + text_summary.contains("failed") || text_summary.contains("BUILD FAILURE"); + let looks_clean = text_summary.contains("passed (") + && !text_summary.contains("failed") + && !text_summary.contains("BUILD FAILURE"); + + if looks_clean && !zero_tests { + return text_summary.to_string(); + } + + let sf = surefire_reports::parse_dir( + &cwd.join("target/surefire-reports"), + Some(since), + app_package, + ); + let fs = surefire_reports::parse_dir( + &cwd.join("target/failsafe-reports"), + Some(since), + app_package, + ); + + match (zero_tests, has_failures, &sf, &fs) { + (true, _, None, None) => { + "mvn test: 0 tests executed — surefire nie wykrył testów. \ + Sprawdź pom.xml (plugin surefire configuration) lub uruchom: \ + rtk proxy mvn test" + .to_string() + } + (_, true, None, None) => format!( + "{text_summary}\n(no XML reports found — check target/surefire-reports/ \ + or run: rtk proxy mvn test)" + ), + _ => render_enriched(text_summary, sf.as_ref(), fs.as_ref()), + } +} + +fn render_enriched( + text_summary: &str, + surefire: Option<&SurefireResult>, + failsafe: Option<&SurefireResult>, +) -> String { + let mut out = String::from(text_summary); + + if let Some(sf) = surefire { + if !sf.failures.is_empty() { + out.push_str("\n\nFailures (from surefire-reports/):\n"); + render_failure_block(&mut out, &sf.failures); + } + } + + if let Some(fs) = failsafe { + if !fs.failures.is_empty() { + out.push_str("\n\nIntegration failures (from failsafe-reports/):\n"); + render_failure_block(&mut out, &fs.failures); + } + } + + let footer = render_footer(surefire, failsafe); + if !footer.is_empty() { + out.push_str("\n\n"); + out.push_str(&footer); + } + + out +} + +fn render_failure_block(out: &mut String, failures: &[TestFailure]) { + let shown = failures.iter().take(MAX_FAILURES_PER_SOURCE); + for (i, f) in shown.enumerate() { + writeln!(out, "{}. {}.{}", i + 1, f.test_class, f.test_method).ok(); + if let Some(kind_label) = failure_kind_label(f) { + writeln!(out, " {kind_label}").ok(); + } + if let Some(trace) = &f.stack_trace { + for line in trace.lines() { + writeln!(out, " {line}").ok(); + } + } + if let Some(output) = f.test_output.as_deref().filter(|s| !s.is_empty()) { + writeln!(out, " captured output:").ok(); + for line in output.lines() { + writeln!(out, " {line}").ok(); + } + } + out.push('\n'); + } + if failures.len() > MAX_FAILURES_PER_SOURCE { + writeln!( + out, + "... +{} more failures", + failures.len() - MAX_FAILURES_PER_SOURCE + ) + .ok(); + } +} + +fn failure_kind_label(f: &TestFailure) -> Option { + let msg = f.message.as_deref().unwrap_or("").trim(); + let ty = f + .failure_type + .as_deref() + .and_then(|t| t.rsplit('.').next()) + .unwrap_or(""); + match (ty.is_empty(), msg.is_empty()) { + (true, true) => None, + (true, false) => Some(msg.to_string()), + (false, true) => Some(ty.to_string()), + (false, false) => Some(format!("{ty}: {msg}")), + } + .map(|s| match f.kind { + FailureKind::Error => format!("[error] {s}"), + FailureKind::Failure => s, + }) +} + +fn render_footer( + surefire: Option<&SurefireResult>, + failsafe: Option<&SurefireResult>, ) -> String { - text.to_string() + let mut parts: Vec = Vec::new(); + let (sf_read, sf_stale, sf_bad) = counts(surefire); + let (fs_read, fs_stale, fs_bad) = counts(failsafe); + + if sf_read > 0 { + parts.push(format!("{sf_read} surefire")); + } + if fs_read > 0 { + parts.push(format!("{fs_read} failsafe")); + } + let stale = sf_stale + fs_stale; + if stale > 0 { + parts.push(format!("{stale} stale files skipped")); + } + let malformed = sf_bad + fs_bad; + if malformed > 0 { + parts.push(format!("{malformed} malformed")); + } + if parts.is_empty() { + return String::new(); + } + format!("(reports: {})", parts.join(", ")) +} + +fn counts(r: Option<&SurefireResult>) -> (usize, usize, usize) { + r.map(|x| (x.files_read, x.files_skipped_stale, x.files_malformed)) + .unwrap_or((0, 0, 0)) } /// Filter `mvn test` output using a state machine parser. @@ -1884,4 +2035,92 @@ mod tests { output_tokens, ); } + + #[test] + fn enrich_happy_path_passes_through_without_io() { + let tmp = tempfile::tempdir().unwrap(); + // No target/ directory exists under tmp — ensures no I/O fallback would succeed. + let text = "mvn test: 42 passed (1.234 s)"; + let out = super::enrich_with_reports( + text, + tmp.path(), + std::time::SystemTime::now(), + Some("com.example"), + ); + assert_eq!(out, text); + } + + #[test] + fn enrich_no_tests_with_no_reports_emits_red_flag() { + let tmp = tempfile::tempdir().unwrap(); + let text = "mvn test: no tests run"; + let out = super::enrich_with_reports( + text, + tmp.path(), + std::time::SystemTime::now(), + Some("com.example"), + ); + assert!(out.contains("0 tests executed")); + assert!(out.contains("rtk proxy mvn test") || out.contains("surefire")); + } + + #[test] + fn enrich_with_surefire_fixture_appends_failures_section() { + let tmp = tempfile::tempdir().unwrap(); + let reports_dir = tmp.path().join("target/surefire-reports"); + std::fs::create_dir_all(&reports_dir).unwrap(); + std::fs::copy( + "tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml", + reports_dir.join("TEST-com.example.FailingTest.xml"), + ) + .unwrap(); + + let since = std::time::SystemTime::now() - std::time::Duration::from_secs(60); + let text = "mvn test: 4 run, 2 failed (01:02 min)\nBUILD FAILURE"; + let out = super::enrich_with_reports(text, tmp.path(), since, Some("com.example")); + + assert!(out.contains("Failures (from surefire-reports/)")); + assert!(out.contains("com.example.FailingTest.shouldReturnUser")); + assert!(out.contains("reports:")); + } + + #[test] + fn enrich_with_both_report_dirs_appends_both_sections() { + let tmp = tempfile::tempdir().unwrap(); + let sf = tmp.path().join("target/surefire-reports"); + let fs = tmp.path().join("target/failsafe-reports"); + std::fs::create_dir_all(&sf).unwrap(); + std::fs::create_dir_all(&fs).unwrap(); + std::fs::copy( + "tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml", + sf.join("TEST-com.example.FailingTest.xml"), + ) + .unwrap(); + std::fs::copy( + "tests/fixtures/java/failsafe-reports/TEST-com.example.DbIntegrationIT.xml", + fs.join("TEST-com.example.DbIntegrationIT.xml"), + ) + .unwrap(); + + let since = std::time::SystemTime::now() - std::time::Duration::from_secs(60); + let text = "mvn verify: 10 run, 3 failed (03:30 min)\nBUILD FAILURE"; + let out = super::enrich_with_reports(text, tmp.path(), since, Some("com.example")); + assert!(out.contains("Failures (from surefire-reports/)")); + assert!(out.contains("Integration failures (from failsafe-reports/)")); + assert!(out.contains("Caused by: org.hibernate.HibernateException")); + } + + #[test] + fn enrich_failures_without_xml_appends_hint() { + let tmp = tempfile::tempdir().unwrap(); + let text = "mvn test: 5 run, 2 failed (0.500 s)\nBUILD FAILURE"; + let out = super::enrich_with_reports( + text, + tmp.path(), + std::time::SystemTime::now(), + Some("com.example"), + ); + assert!(out.contains("no XML reports")); + assert!(out.contains("rtk proxy mvn test")); + } } From eb3565a5fed499fe297a054cd535d5ea6dd955a2 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:55:42 +0200 Subject: [PATCH 40/44] =?UTF-8?q?fix(mvn):=20enrich=20zero=5Ftests=20check?= =?UTF-8?q?=20=E2=80=94=20prefix=20with=20":=20"=20to=20avoid=20substring?= =?UTF-8?q?=20match?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit "10 passed" previously triggered the zero_tests branch via substring match on "0 passed". Anchoring with ": 0 passed" scopes the check to literal zero. Also translates the red-flag message to English. --- src/cmds/java/mvn_cmd.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/src/cmds/java/mvn_cmd.rs b/src/cmds/java/mvn_cmd.rs index a5f269b66..93113ff26 100644 --- a/src/cmds/java/mvn_cmd.rs +++ b/src/cmds/java/mvn_cmd.rs @@ -384,7 +384,7 @@ pub(crate) fn enrich_with_reports( } let zero_tests = text_summary == "mvn test: no tests run" - || text_summary.contains("0 passed"); + || text_summary.contains(": 0 passed"); let has_failures = text_summary.contains("failed") || text_summary.contains("BUILD FAILURE"); let looks_clean = text_summary.contains("passed (") @@ -408,8 +408,8 @@ pub(crate) fn enrich_with_reports( match (zero_tests, has_failures, &sf, &fs) { (true, _, None, None) => { - "mvn test: 0 tests executed — surefire nie wykrył testów. \ - Sprawdź pom.xml (plugin surefire configuration) lub uruchom: \ + "mvn test: 0 tests executed — surefire detected no tests. \ + Check pom.xml (surefire plugin configuration) or run: \ rtk proxy mvn test" .to_string() } @@ -2123,4 +2123,18 @@ mod tests { assert!(out.contains("no XML reports")); assert!(out.contains("rtk proxy mvn test")); } + + #[test] + fn enrich_happy_path_with_10_passed_is_short_circuited() { + // Regression: "10 passed" must not trigger zero_tests via substring of "0 passed". + let tmp = tempfile::tempdir().unwrap(); + let text = "mvn test: 10 passed (0.500 s)"; + let out = super::enrich_with_reports( + text, + tmp.path(), + std::time::SystemTime::now(), + Some("com.example"), + ); + assert_eq!(out, text, "10 passed must short-circuit without enrichment"); + } } From 52ecec7db2bc1684f2667751f4f05c7bc09ca8fe Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 22:59:27 +0200 Subject: [PATCH 41/44] test(mvn): snapshot tests for enriched surefire/failsafe rendering Pins output format for surefire-only, both-report-dirs, and the no-tests red-flag path. Adjust with 'cargo insta review' when output changes. --- src/cmds/java/mvn_cmd.rs | 63 +++++++++++++++++++ ...apshot_enriched_surefire_and_failsafe.snap | 48 ++++++++++++++ ...ests__snapshot_enriched_surefire_only.snap | 24 +++++++ ...md__tests__snapshot_red_flag_no_tests.snap | 5 ++ 4 files changed, 140 insertions(+) create mode 100644 src/cmds/java/snapshots/rtk__cmds__java__mvn_cmd__tests__snapshot_enriched_surefire_and_failsafe.snap create mode 100644 src/cmds/java/snapshots/rtk__cmds__java__mvn_cmd__tests__snapshot_enriched_surefire_only.snap create mode 100644 src/cmds/java/snapshots/rtk__cmds__java__mvn_cmd__tests__snapshot_red_flag_no_tests.snap diff --git a/src/cmds/java/mvn_cmd.rs b/src/cmds/java/mvn_cmd.rs index 93113ff26..f12bf80de 100644 --- a/src/cmds/java/mvn_cmd.rs +++ b/src/cmds/java/mvn_cmd.rs @@ -2137,4 +2137,67 @@ mod tests { ); assert_eq!(out, text, "10 passed must short-circuit without enrichment"); } + + #[test] + fn snapshot_enriched_surefire_only() { + let tmp = tempfile::tempdir().unwrap(); + let reports = tmp.path().join("target/surefire-reports"); + std::fs::create_dir_all(&reports).unwrap(); + for name in [ + "TEST-com.example.FailingTest.xml", + "TEST-com.example.PassingTest.xml", + ] { + std::fs::copy( + format!("tests/fixtures/java/surefire-reports/{name}"), + reports.join(name), + ) + .unwrap(); + } + + let since = std::time::SystemTime::now() - std::time::Duration::from_secs(60); + let text = "mvn test: 7 run, 2 failed (00:10 min)\nBUILD FAILURE"; + let out = super::enrich_with_reports(text, tmp.path(), since, Some("com.example")); + insta::assert_snapshot!(out); + } + + #[test] + fn snapshot_enriched_surefire_and_failsafe() { + let tmp = tempfile::tempdir().unwrap(); + let sf = tmp.path().join("target/surefire-reports"); + let fs = tmp.path().join("target/failsafe-reports"); + std::fs::create_dir_all(&sf).unwrap(); + std::fs::create_dir_all(&fs).unwrap(); + std::fs::copy( + "tests/fixtures/java/surefire-reports/TEST-com.example.FailingTest.xml", + sf.join("TEST-com.example.FailingTest.xml"), + ) + .unwrap(); + std::fs::copy( + "tests/fixtures/java/failsafe-reports/TEST-com.example.DbIntegrationIT.xml", + fs.join("TEST-com.example.DbIntegrationIT.xml"), + ) + .unwrap(); + std::fs::copy( + "tests/fixtures/java/failsafe-reports/TEST-com.example.PortConflictIT.xml", + fs.join("TEST-com.example.PortConflictIT.xml"), + ) + .unwrap(); + + let since = std::time::SystemTime::now() - std::time::Duration::from_secs(60); + let text = "mvn verify: 12 run, 4 failed (05:42 min)\nBUILD FAILURE"; + let out = super::enrich_with_reports(text, tmp.path(), since, Some("com.example")); + insta::assert_snapshot!(out); + } + + #[test] + fn snapshot_red_flag_no_tests() { + let tmp = tempfile::tempdir().unwrap(); + let out = super::enrich_with_reports( + "mvn test: no tests run", + tmp.path(), + std::time::SystemTime::now(), + Some("com.example"), + ); + insta::assert_snapshot!(out); + } } diff --git a/src/cmds/java/snapshots/rtk__cmds__java__mvn_cmd__tests__snapshot_enriched_surefire_and_failsafe.snap b/src/cmds/java/snapshots/rtk__cmds__java__mvn_cmd__tests__snapshot_enriched_surefire_and_failsafe.snap new file mode 100644 index 000000000..f631b2f83 --- /dev/null +++ b/src/cmds/java/snapshots/rtk__cmds__java__mvn_cmd__tests__snapshot_enriched_surefire_and_failsafe.snap @@ -0,0 +1,48 @@ +--- +source: src/cmds/java/mvn_cmd.rs +expression: out +--- +mvn verify: 12 run, 4 failed (05:42 min) +BUILD FAILURE + +Failures (from surefire-reports/): +1. com.example.FailingTest.shouldReturnUser + AssertionFailedError: expected:<200> but was:<404> + org.opentest4j.AssertionFailedError: expected:<200> but was:<404> + ... 1 framework frames omitted + at com.example.FailingTest.shouldReturnUser(FailingTest.java:42) + ... 1 framework frames omitted + +2. com.example.FailingTest.shouldHandleNull + AssertionError: Unexpected exception: NullPointerException + java.lang.AssertionError: Unexpected exception: NullPointerException + at com.example.FailingTest.shouldHandleNull(FailingTest.java:55) + ... 1 framework frames omitted + + + +Integration failures (from failsafe-reports/): +1. com.example.PortConflictIT.shouldStartServer + [error] BindException: Address already in use + java.net.BindException: Address already in use + ... 2 framework frames omitted + at com.example.PortConflictIT.shouldStartServer(PortConflictIT.java:42) + +2. com.example.DbIntegrationIT.shouldConnect + [error] IllegalStateException: Failed to load ApplicationContext + java.lang.IllegalStateException: Failed to load ApplicationContext + ... 3 framework frames omitted + Caused by: org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'dataSource' + ... 3 framework frames omitted + Caused by: org.hibernate.HibernateException: Unable to acquire JDBC Connection; nested exception is java.sql.SQLTransientConnectionException: HikariPool-1 - Connection is not available, request timed ... + ... 1 framework frames omitted + at com.example.DbIntegrationIT.shouldConnect(DbIntegrationIT.java:88) + ... 1 framework frames omitted + captured output: + [STDERR] + 2026-04-15 10:42:17 ERROR HikariDataSource - HikariPool-1 - Connection is not available + Connection refused (Connection refused) + + + +(reports: 1 surefire, 2 failsafe) diff --git a/src/cmds/java/snapshots/rtk__cmds__java__mvn_cmd__tests__snapshot_enriched_surefire_only.snap b/src/cmds/java/snapshots/rtk__cmds__java__mvn_cmd__tests__snapshot_enriched_surefire_only.snap new file mode 100644 index 000000000..fed4a62b5 --- /dev/null +++ b/src/cmds/java/snapshots/rtk__cmds__java__mvn_cmd__tests__snapshot_enriched_surefire_only.snap @@ -0,0 +1,24 @@ +--- +source: src/cmds/java/mvn_cmd.rs +expression: out +--- +mvn test: 7 run, 2 failed (00:10 min) +BUILD FAILURE + +Failures (from surefire-reports/): +1. com.example.FailingTest.shouldReturnUser + AssertionFailedError: expected:<200> but was:<404> + org.opentest4j.AssertionFailedError: expected:<200> but was:<404> + ... 1 framework frames omitted + at com.example.FailingTest.shouldReturnUser(FailingTest.java:42) + ... 1 framework frames omitted + +2. com.example.FailingTest.shouldHandleNull + AssertionError: Unexpected exception: NullPointerException + java.lang.AssertionError: Unexpected exception: NullPointerException + at com.example.FailingTest.shouldHandleNull(FailingTest.java:55) + ... 1 framework frames omitted + + + +(reports: 2 surefire) diff --git a/src/cmds/java/snapshots/rtk__cmds__java__mvn_cmd__tests__snapshot_red_flag_no_tests.snap b/src/cmds/java/snapshots/rtk__cmds__java__mvn_cmd__tests__snapshot_red_flag_no_tests.snap new file mode 100644 index 000000000..dfa55773d --- /dev/null +++ b/src/cmds/java/snapshots/rtk__cmds__java__mvn_cmd__tests__snapshot_red_flag_no_tests.snap @@ -0,0 +1,5 @@ +--- +source: src/cmds/java/mvn_cmd.rs +expression: out +--- +mvn test: 0 tests executed — surefire detected no tests. Check pom.xml (surefire plugin configuration) or run: rtk proxy mvn test From 3caa4d4a216feda07a05ea2bf81188c616a11250 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 23:03:47 +0200 Subject: [PATCH 42/44] =?UTF-8?q?test(mvn):=20token=20savings=20=E2=80=94?= =?UTF-8?q?=20happy=20path=20identity,=20failure=20path=20=E2=89=A585%?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Asserts happy-path enrichment is a no-op and that even on the enriched failure path with a multi-segment Caused-by chain we stay under 15% of the raw log size. --- src/cmds/java/mvn_cmd.rs | 48 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/src/cmds/java/mvn_cmd.rs b/src/cmds/java/mvn_cmd.rs index f12bf80de..4f60b56f3 100644 --- a/src/cmds/java/mvn_cmd.rs +++ b/src/cmds/java/mvn_cmd.rs @@ -2200,4 +2200,52 @@ mod tests { ); insta::assert_snapshot!(out); } + + #[test] + fn savings_happy_path_unchanged_by_enrichment() { + // Happy path short-circuits without I/O; savings must match pre-enrichment. + let text = "mvn test: 859 passed, 4 skipped (02:11 min)"; + let tmp = tempfile::tempdir().unwrap(); + let out = super::enrich_with_reports( + text, + tmp.path(), + std::time::SystemTime::now(), + Some("com.example"), + ); + assert_eq!(out, text, "happy path must not allocate or append"); + } + + #[test] + fn savings_enriched_failures_stays_under_15_percent() { + // Simulate a ~2000-line build log whose text filter produced a short + // summary, plus one big failsafe XML with system-err and a 3-segment + // Caused-by chain. Total enriched output must be ≥85% smaller than raw. + let raw_log: String = std::iter::repeat_n( + "[INFO] Running com.example.some.Heavy.Test — lots of noisy build output\n", + 2000, + ) + .collect::(); + + let tmp = tempfile::tempdir().unwrap(); + let fs = tmp.path().join("target/failsafe-reports"); + std::fs::create_dir_all(&fs).unwrap(); + std::fs::copy( + "tests/fixtures/java/failsafe-reports/TEST-com.example.DbIntegrationIT.xml", + fs.join("TEST-com.example.DbIntegrationIT.xml"), + ) + .unwrap(); + + let since = std::time::SystemTime::now() - std::time::Duration::from_secs(60); + let text_summary = "mvn verify: 4 run, 1 failed (01:23 min)\nBUILD FAILURE"; + let enriched = super::enrich_with_reports(text_summary, tmp.path(), since, Some("com.example")); + + let raw_tokens = count_tokens(&raw_log); + let enriched_tokens = count_tokens(&enriched); + let savings = 100.0 - (enriched_tokens as f64 / raw_tokens as f64 * 100.0); + assert!( + savings >= 85.0, + "expected ≥85% savings on enriched failure path, got {savings:.1}% \ + (raw={raw_tokens}, enriched={enriched_tokens})" + ); + } } From 5a71041f57350d03a7e08cbaab610fe22a9f2aa3 Mon Sep 17 00:00:00 2001 From: mariuszs Date: Wed, 15 Apr 2026 23:11:16 +0200 Subject: [PATCH 43/44] docs(mvn): document XML enrichment, appPackage detection, red-flag Describes the new post-filter XML read, groupId autodetect order, stale-file time-gate, and the rtk proxy escape hatch. --- CHANGELOG.md | 3 +++ src/cmds/java/README.md | 45 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d5c4e248..161233287 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Features * **mvn:** add Maven (Java) filter module — test, compile, checkstyle:check, dependency:tree ([#1089](https://github.com/rtk-ai/rtk/pull/1089)) +* **mvn:** enrich `mvn test` / `verify` / `integration-test` output with structured failure details read from `target/surefire-reports/TEST-*.xml` and `target/failsafe-reports/*.xml`. Stack traces are segmented on `Caused by:` with framework frames collapsed; the root-cause segment is always preserved. +* **mvn:** autodetect application package from `pom.xml` `` (with `/` fallback) for framework-frame classification. Override via `RTK_MVN_APP_PACKAGE`. +* **mvn:** red-flag heuristic — `no tests run` with no fresh XML reports emits a diagnostic pointing at surefire misconfiguration. ### Bug Fixes diff --git a/src/cmds/java/README.md b/src/cmds/java/README.md index 3f7ffcd02..a52a6324e 100644 --- a/src/cmds/java/README.md +++ b/src/cmds/java/README.md @@ -12,3 +12,48 @@ - `mvn dependency:tree` strips "omitted for duplicate" lines, "version managed from" annotations, and collapses deep transitive branches - Unknown goals stream via `cmd.status()` passthrough (safe for long-running goals like `spring-boot:run`); rare lifecycle phases (`package`, `install`, `verify`, `clean`, `deploy`) also passthrough — filtered only when the output shape matches compile - Routing via Clap sub-enum with `#[command(external_subcommand)] Other` for unknown goals; compile-like and checkstyle goals received as `Other` are auto-re-dispatched by `route_goal` to the right filter + +## Output enrichment from Surefire/Failsafe XML reports + +When `mvn test` (or verify/integration-test) reports failures, rtk reads +`target/surefire-reports/TEST-*.xml` and `target/failsafe-reports/*.xml` +**after** the build finishes and appends a structured Failures section +with: + +- Full stack trace per failure, with framework frames collapsed and the + root-cause segment preserved (up to 50 lines per trace). +- Captured stdout + stderr from failing tests only, capped at 2000 chars + per test and 10000 chars total. +- File counters in the footer: `(reports: N surefire, M failsafe, K stale files skipped)`. + +### Application-package detection + +rtk classifies stack frames as *application* vs *framework* by comparing +frame class names against the Java `groupId` from `pom.xml`: + +1. `RTK_MVN_APP_PACKAGE` env var (if set, overrides everything). +2. `/` from the pom.xml in the current working directory. +3. Fallback: `//`. +4. Otherwise: no filtering — full stack traces are preserved. + +### Time-gated report reads + +Stale XML reports from previous runs are skipped: only files with +`mtime >= started_at` (captured just before `mvn` executes) are parsed. + +### Red-flag heuristic for "0 tests" + +If the summary says `no tests run` but surefire reports are empty or +absent, rtk emits a diagnostic instead of the silent summary: + +``` +mvn test: 0 tests executed — surefire detected no tests. Check pom.xml (surefire plugin configuration) or run: rtk proxy mvn test +``` + +### Bypass + +For the rare cases where you need the full raw Maven output: + +```bash +rtk proxy mvn test +``` From 2a1eea81f403ddeb58d9fb1b390d63d053099a0a Mon Sep 17 00:00:00 2001 From: mariuszs Date: Thu, 16 Apr 2026 08:05:14 +0200 Subject: [PATCH 44/44] =?UTF-8?q?refactor(mvn):=20simplify=20=E2=80=94=20u?= =?UTF-8?q?nify=20duplicate=20types,=20remove=20dead=20code?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Consolidate TestCounts into TestSummary (identical struct) - Unify add_collapsed_frames/add_root_cause_frames into add_frames - Remove dead params from parse_dir_with_limits - Change capture: Option to bool in pom_groupid - Clean up #[allow(dead_code)] with proper visibility - Remove WHAT comments --- Cargo.lock | 2 +- src/cmds/java/mvn_cmd.rs | 34 +++-------- src/cmds/java/pom_groupid.rs | 10 ++-- src/cmds/java/stack_trace.rs | 94 ++++++++++--------------------- src/cmds/java/surefire_reports.rs | 29 +--------- 5 files changed, 47 insertions(+), 122 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 71a91f8bb..b0ed806f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -950,7 +950,7 @@ dependencies = [ [[package]] name = "rtk" -version = "0.35.0" +version = "0.34.3" dependencies = [ "anyhow", "automod", diff --git a/src/cmds/java/mvn_cmd.rs b/src/cmds/java/mvn_cmd.rs index 4f60b56f3..b8e9274f9 100644 --- a/src/cmds/java/mvn_cmd.rs +++ b/src/cmds/java/mvn_cmd.rs @@ -4,7 +4,7 @@ //! Preamble -> Testing -> Summary -> Done. //! Strips thousands of noise lines to compact failure reports (99%+ savings). -use crate::cmds::java::surefire_reports::{self, FailureKind, SurefireResult, TestFailure}; +use crate::cmds::java::surefire_reports::{self, FailureKind, SurefireResult, TestFailure, TestSummary}; use crate::core::runner; use crate::core::tracking; use crate::core::utils::{exit_code_from_status, resolved_command, strip_ansi, truncate}; @@ -338,22 +338,6 @@ enum TestParseState { Done, } -#[derive(Default)] -struct TestCounts { - run: u32, - failures: u32, - errors: u32, - skipped: u32, -} - -impl TestCounts { - fn add(&mut self, other: &Self) { - self.run += other.run; - self.failures += other.failures; - self.errors += other.errors; - self.skipped += other.skipped; - } -} struct FailureEntry { name: String, @@ -362,8 +346,8 @@ struct FailureEntry { /// Parse the four count fields from a `TESTS_RUN_RE` captures. The regex /// guarantees four numeric groups so defaulting to 0 is only a safety net. -fn parse_counts(caps: ®ex::Captures) -> TestCounts { - TestCounts { +fn parse_counts(caps: ®ex::Captures) -> TestSummary { + TestSummary { run: caps.get(1).map_or(0, |m| m.as_str().parse().unwrap_or(0)), failures: caps.get(2).map_or(0, |m| m.as_str().parse().unwrap_or(0)), errors: caps.get(3).map_or(0, |m| m.as_str().parse().unwrap_or(0)), @@ -547,8 +531,8 @@ fn filter_mvn_test(output: &str) -> String { let mut failures: Vec = Vec::with_capacity(MAX_FAILURES_SHOWN); let mut current_failure: Option = None; - let mut cumulative = TestCounts::default(); - let mut section: Option = None; + let mut cumulative = TestSummary::default(); + let mut section: Option = None; let mut total_time: Option = None; let mut total_failures_seen: usize = 0; @@ -887,17 +871,14 @@ fn should_keep_compile_line(line: &str) -> bool { let stripped = strip_maven_prefix(line); - // Keep error lines if line.starts_with(ERROR_TAG) { return !is_maven_boilerplate(line); } - // Keep BUILD SUCCESS/FAILURE if stripped.contains("BUILD SUCCESS") || stripped.contains("BUILD FAILURE") { return true; } - // Keep Total time if TOTAL_TIME_RE.is_match(stripped) { return true; } @@ -935,7 +916,6 @@ fn should_keep_compile_line(line: &str) -> bool { return true; } - // Strip [WARNING] lines for build filter if line.starts_with(WARNING_TAG) { return false; } @@ -1182,13 +1162,13 @@ mod tests { #[test] fn test_test_counts_add() { - let mut a = TestCounts { + let mut a = TestSummary { run: 10, failures: 1, errors: 2, skipped: 3, }; - let b = TestCounts { + let b = TestSummary { run: 100, failures: 20, errors: 30, diff --git a/src/cmds/java/pom_groupid.rs b/src/cmds/java/pom_groupid.rs index a64bf7b80..f2785df91 100644 --- a/src/cmds/java/pom_groupid.rs +++ b/src/cmds/java/pom_groupid.rs @@ -37,7 +37,7 @@ pub(crate) fn extract_groupid(xml: &str) -> Option { let mut stack: Vec = Vec::new(); let mut top_level_groupid: Option = None; let mut parent_groupid: Option = None; - let mut capture: Option = None; + let mut capturing = false; loop { match reader.read_event_into(&mut buf) { @@ -47,14 +47,14 @@ pub(crate) fn extract_groupid(xml: &str) -> Option { .and_then(|s| s.rsplit(':').next()) .unwrap_or("") .to_string(); - stack.push(name.clone()); + stack.push(name); if is_top_level_groupid(&stack) || is_parent_groupid(&stack) { - capture = Some(name); + capturing = true; } } Ok(Event::Text(t)) => { - if capture.is_some() { + if capturing { if let Ok(text) = t.unescape() { let text = text.trim(); if !text.is_empty() { @@ -69,7 +69,7 @@ pub(crate) fn extract_groupid(xml: &str) -> Option { } Ok(Event::End(_)) => { stack.pop(); - capture = None; + capturing = false; if top_level_groupid.is_some() { break; } diff --git a/src/cmds/java/stack_trace.rs b/src/cmds/java/stack_trace.rs index f51adf891..ca3c7d042 100644 --- a/src/cmds/java/stack_trace.rs +++ b/src/cmds/java/stack_trace.rs @@ -8,10 +8,9 @@ const MAX_HEADER_LENGTH: usize = 200; const DEFAULT_ROOT_CAUSE_APP_FRAMES: usize = 10; #[derive(Debug, PartialEq)] -#[allow(dead_code)] -pub(crate) struct Segment { - pub(crate) header: String, - pub(crate) frames: Vec, +struct Segment { + header: String, + frames: Vec, } /// Split a stack trace into segments. @@ -24,8 +23,7 @@ pub(crate) struct Segment { /// Indented `"\tCaused by:"` inside Suppressed blocks stays as a frame and /// does NOT split segments — `is_structural_line` preserves it during frame /// collapsing. -#[allow(dead_code)] -pub(crate) fn parse_segments(trace: &str) -> Vec { +fn parse_segments(trace: &str) -> Vec { let trace = trace.trim(); if trace.is_empty() { return Vec::new(); @@ -61,7 +59,6 @@ pub(crate) fn parse_segments(trace: &str) -> Vec { /// Truncate a header to `MAX_HEADER_LENGTH` **Unicode characters** (not bytes), /// appending "..." if truncated. -#[allow(dead_code)] pub(crate) fn truncate_header(header: &str) -> String { let char_count = header.chars().count(); if char_count <= MAX_HEADER_LENGTH { @@ -77,8 +74,7 @@ pub(crate) fn truncate_header(header: &str) -> String { /// When `app_package` is `None` or empty, every frame is considered an app frame /// (framework collapsing disabled). Summary lines like `"\t... 42 more"` are /// always framework artifacts. -#[allow(dead_code)] -pub(crate) fn is_application_frame(frame: &str, app_package: Option<&str>) -> bool { +fn is_application_frame(frame: &str, app_package: Option<&str>) -> bool { let Some(pkg) = app_package.filter(|p| !p.is_empty()) else { return true; }; @@ -93,8 +89,7 @@ pub(crate) fn is_application_frame(frame: &str, app_package: Option<&str>) -> bo /// frames: Suppressed block headers and **indented** Caused-by lines (which /// appear inside Suppressed blocks; top-level Caused-by is already a segment /// boundary, not a frame). -#[allow(dead_code)] -pub(crate) fn is_structural_line(line: &str) -> bool { +fn is_structural_line(line: &str) -> bool { if line.is_empty() { return false; } @@ -118,50 +113,14 @@ pub(crate) fn is_structural_line(line: &str) -> bool { /// /// When `app_package` is `None`, all frames are considered app frames and no /// collapsing occurs — pass-through mode. -#[allow(dead_code)] -pub(crate) fn add_collapsed_frames( - output: &mut Vec, - frames: &[String], - app_package: Option<&str>, -) { - let filter = app_package.is_some_and(|p| !p.is_empty()); - if !filter { - for frame in frames { - output.push(frame.clone()); - } - return; - } - - let mut framework_count: usize = 0; - for frame in frames { - let structural = is_structural_line(frame); - if structural || is_application_frame(frame, app_package) { - if framework_count > 0 { - output.push(format!("\t... {framework_count} framework frames omitted")); - framework_count = 0; - } - if structural { - output.push(truncate_header(frame)); - } else { - output.push(frame.clone()); - } - } else { - framework_count += 1; - } - } - if framework_count > 0 { - output.push(format!("\t... {framework_count} framework frames omitted")); - } -} - -/// Like `add_collapsed_frames`, but caps the number of non-structural -/// application frames at `DEFAULT_ROOT_CAUSE_APP_FRAMES`. Structural lines -/// (Suppressed, nested Caused by) bypass the cap. -#[allow(dead_code)] -pub(crate) fn add_root_cause_frames( +/// +/// When `max_app_frames` is `Some(n)`, at most `n` non-structural application +/// frames are kept (root-cause mode). Structural lines bypass the cap. +fn add_frames( output: &mut Vec, frames: &[String], app_package: Option<&str>, + max_app_frames: Option, ) { let filter = app_package.is_some_and(|p| !p.is_empty()); if !filter { @@ -182,7 +141,7 @@ pub(crate) fn add_root_cause_frames( } if structural { output.push(truncate_header(frame)); - } else if app_count < DEFAULT_ROOT_CAUSE_APP_FRAMES { + } else if max_app_frames.is_none_or(|cap| app_count < cap) { output.push(frame.clone()); app_count += 1; } @@ -197,14 +156,13 @@ pub(crate) fn add_root_cause_frames( /// Process a Java stack trace: /// - Top-level header preserved (truncated to 200 chars). -/// - Non-root segments: header + `add_collapsed_frames`. -/// - Root (last) segment: header + `add_root_cause_frames`. +/// - Non-root segments: header + collapsed frames. +/// - Root (last) segment: header + capped root-cause frames. /// - If `max_lines > 0` and the collapsed output exceeds the cap, /// `apply_hard_cap` is called to truncate while preserving the root cause. /// /// Returns `None` iff `raw` is empty or whitespace-only. -#[allow(dead_code)] -pub fn process(raw: &str, app_package: Option<&str>, max_lines: usize) -> Option { +pub(crate) fn process(raw: &str, app_package: Option<&str>, max_lines: usize) -> Option { let trimmed = raw.trim(); if trimmed.is_empty() { return None; @@ -219,16 +177,21 @@ pub fn process(raw: &str, app_package: Option<&str>, max_lines: usize) -> Option out.push(truncate_header(&segments[0].header)); if segments.len() == 1 { - add_collapsed_frames(&mut out, &segments[0].frames, app_package); + add_frames(&mut out, &segments[0].frames, app_package, None); } else { - add_collapsed_frames(&mut out, &segments[0].frames, app_package); + add_frames(&mut out, &segments[0].frames, app_package, None); for seg in &segments[1..segments.len() - 1] { out.push(truncate_header(&seg.header)); - add_collapsed_frames(&mut out, &seg.frames, app_package); + add_frames(&mut out, &seg.frames, app_package, None); } let root = segments.last().unwrap(); out.push(truncate_header(&root.header)); - add_root_cause_frames(&mut out, &root.frames, app_package); + add_frames( + &mut out, + &root.frames, + app_package, + Some(DEFAULT_ROOT_CAUSE_APP_FRAMES), + ); } if max_lines > 0 && out.len() > max_lines { @@ -434,7 +397,12 @@ mod tests { fn collect_root_cause(frames: &[&str], app_package: Option<&str>) -> Vec { let frames: Vec = frames.iter().map(|s| s.to_string()).collect(); let mut out = Vec::new(); - add_root_cause_frames(&mut out, &frames, app_package); + add_frames( + &mut out, + &frames, + app_package, + Some(DEFAULT_ROOT_CAUSE_APP_FRAMES), + ); out } @@ -542,7 +510,7 @@ mod tests { fn collect_collapsed(frames: &[&str], app_package: Option<&str>) -> Vec { let frames: Vec = frames.iter().map(|s| s.to_string()).collect(); let mut out = Vec::new(); - add_collapsed_frames(&mut out, &frames, app_package); + add_frames(&mut out, &frames, app_package, None); out } diff --git a/src/cmds/java/surefire_reports.rs b/src/cmds/java/surefire_reports.rs index 326b03e2e..15e5190be 100644 --- a/src/cmds/java/surefire_reports.rs +++ b/src/cmds/java/surefire_reports.rs @@ -11,8 +11,7 @@ use std::time::SystemTime; pub const DEFAULT_STACK_TRACE_LINES: usize = 50; pub const DEFAULT_PER_TEST_OUTPUT_LIMIT: usize = 2000; -#[allow(dead_code)] -pub const DEFAULT_TOTAL_OUTPUT_LIMIT: usize = 10_000; +const DEFAULT_TOTAL_OUTPUT_LIMIT: usize = 10_000; #[derive(Debug, Default, PartialEq)] pub struct TestSummary { @@ -23,7 +22,7 @@ pub struct TestSummary { } impl TestSummary { - fn add(&mut self, other: &Self) { + pub(crate) fn add(&mut self, other: &Self) { self.run += other.run; self.failures += other.failures; self.errors += other.errors; @@ -88,7 +87,6 @@ fn parse_u32_attr(reader: &Reader<&[u8]>, start: &BytesStart<'_>, key: &[u8]) -> /// /// Returns `None` only if the XML is completely malformed; otherwise a /// best-effort result is returned. -#[allow(dead_code)] pub(crate) fn parse_content(xml: &str, app_package: Option<&str>) -> Option { #[derive(Clone, Copy, PartialEq)] enum CaptureField { @@ -269,30 +267,10 @@ fn truncate_test_output(output: &str, max_chars: usize) -> String { /// - Files whose `mtime < since` are skipped and counted in `files_skipped_stale`. /// - Files that parse to `None` (malformed) count in `files_malformed`. /// - Returns `None` only if the directory does not exist or is empty. -#[allow(dead_code)] pub fn parse_dir( dir: &Path, since: Option, app_package: Option<&str>, -) -> Option { - parse_dir_with_limits( - dir, - since, - app_package, - DEFAULT_PER_TEST_OUTPUT_LIMIT, - DEFAULT_TOTAL_OUTPUT_LIMIT, - DEFAULT_STACK_TRACE_LINES, - ) -} - -#[allow(dead_code)] -pub fn parse_dir_with_limits( - dir: &Path, - since: Option, - app_package: Option<&str>, - _per_test_output_limit: usize, - total_output_limit: usize, - _stack_trace_lines: usize, ) -> Option { if !dir.exists() || !dir.is_dir() { return None; @@ -350,11 +328,10 @@ pub fn parse_dir_with_limits( return None; } - apply_total_output_limit(&mut aggregate.failures, total_output_limit); + apply_total_output_limit(&mut aggregate.failures, DEFAULT_TOTAL_OUTPUT_LIMIT); Some(aggregate) } -#[allow(dead_code)] fn apply_total_output_limit(failures: &mut [TestFailure], total_limit: usize) { let mut budget = total_limit; let mut exhausted = false;