diff --git a/codex-rs/app-server/tests/suite/v2/turn_start.rs b/codex-rs/app-server/tests/suite/v2/turn_start.rs index 441c5558bd5..e235ee6b61a 100644 --- a/codex-rs/app-server/tests/suite/v2/turn_start.rs +++ b/codex-rs/app-server/tests/suite/v2/turn_start.rs @@ -1857,6 +1857,187 @@ async fn turn_start_emits_spawn_agent_item_with_model_metadata_v2() -> Result<() Ok(()) } +#[tokio::test] +async fn turn_start_emits_spawn_agent_item_with_effective_role_model_metadata_v2() -> Result<()> { + skip_if_no_network!(Ok(())); + + const CHILD_PROMPT: &str = "child: do work"; + const PARENT_PROMPT: &str = "spawn a child and continue"; + const SPAWN_CALL_ID: &str = "spawn-call-1"; + const REQUESTED_MODEL: &str = "gpt-5.1"; + const REQUESTED_REASONING_EFFORT: ReasoningEffort = ReasoningEffort::Low; + const ROLE_MODEL: &str = "gpt-5.1-codex-max"; + const ROLE_REASONING_EFFORT: ReasoningEffort = ReasoningEffort::High; + + let server = responses::start_mock_server().await; + let spawn_args = serde_json::to_string(&json!({ + "message": CHILD_PROMPT, + "agent_type": "custom", + "model": REQUESTED_MODEL, + "reasoning_effort": REQUESTED_REASONING_EFFORT, + }))?; + let _parent_turn = responses::mount_sse_once_match( + &server, + |req: &wiremock::Request| body_contains(req, PARENT_PROMPT), + responses::sse(vec![ + responses::ev_response_created("resp-turn1-1"), + responses::ev_function_call(SPAWN_CALL_ID, "spawn_agent", &spawn_args), + responses::ev_completed("resp-turn1-1"), + ]), + ) + .await; + let _child_turn = responses::mount_sse_once_match( + &server, + |req: &wiremock::Request| { + body_contains(req, CHILD_PROMPT) && !body_contains(req, SPAWN_CALL_ID) + }, + responses::sse(vec![ + responses::ev_response_created("resp-child-1"), + responses::ev_assistant_message("msg-child-1", "child done"), + responses::ev_completed("resp-child-1"), + ]), + ) + .await; + let _parent_follow_up = responses::mount_sse_once_match( + &server, + |req: &wiremock::Request| body_contains(req, SPAWN_CALL_ID), + responses::sse(vec![ + responses::ev_response_created("resp-turn1-2"), + responses::ev_assistant_message("msg-turn1-2", "parent done"), + responses::ev_completed("resp-turn1-2"), + ]), + ) + .await; + + let codex_home = TempDir::new()?; + create_config_toml( + codex_home.path(), + &server.uri(), + "never", + &BTreeMap::from([(Feature::Collab, true)]), + )?; + std::fs::write( + codex_home.path().join("custom-role.toml"), + format!("model = \"{ROLE_MODEL}\"\nmodel_reasoning_effort = \"{ROLE_REASONING_EFFORT}\"\n",), + )?; + let config_path = codex_home.path().join("config.toml"); + let base_config = std::fs::read_to_string(&config_path)?; + std::fs::write( + &config_path, + format!( + r#"{base_config} + +[agents.custom] +description = "Custom role" +config_file = "./custom-role.toml" +"# + ), + )?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let thread_req = mcp + .send_thread_start_request(ThreadStartParams { + model: Some("gpt-5.2-codex".to_string()), + ..Default::default() + }) + .await?; + let thread_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(thread_req)), + ) + .await??; + let ThreadStartResponse { thread, .. } = to_response::(thread_resp)?; + + let turn_req = mcp + .send_turn_start_request(TurnStartParams { + thread_id: thread.id.clone(), + input: vec![V2UserInput::Text { + text: PARENT_PROMPT.to_string(), + text_elements: Vec::new(), + }], + ..Default::default() + }) + .await?; + let turn_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(turn_req)), + ) + .await??; + let turn: TurnStartResponse = to_response::(turn_resp)?; + + let spawn_completed = timeout(DEFAULT_READ_TIMEOUT, async { + loop { + let completed_notif = mcp + .read_stream_until_notification_message("item/completed") + .await?; + let completed: ItemCompletedNotification = + serde_json::from_value(completed_notif.params.expect("item/completed params"))?; + if let ThreadItem::CollabAgentToolCall { id, .. } = &completed.item + && id == SPAWN_CALL_ID + { + return Ok::(completed.item); + } + } + }) + .await??; + let ThreadItem::CollabAgentToolCall { + id, + tool, + status, + sender_thread_id, + receiver_thread_ids, + prompt, + model, + reasoning_effort, + agents_states, + } = spawn_completed + else { + unreachable!("loop ensures we break on collab agent tool call items"); + }; + let receiver_thread_id = receiver_thread_ids + .first() + .cloned() + .expect("spawn completion should include child thread id"); + assert_eq!(id, SPAWN_CALL_ID); + assert_eq!(tool, CollabAgentTool::SpawnAgent); + assert_eq!(status, CollabAgentToolCallStatus::Completed); + assert_eq!(sender_thread_id, thread.id); + assert_eq!(receiver_thread_ids, vec![receiver_thread_id.clone()]); + assert_eq!(prompt, Some(CHILD_PROMPT.to_string())); + assert_eq!(model, Some(ROLE_MODEL.to_string())); + assert_eq!(reasoning_effort, Some(ROLE_REASONING_EFFORT)); + assert_eq!( + agents_states, + HashMap::from([( + receiver_thread_id, + CollabAgentState { + status: CollabAgentStatus::PendingInit, + message: None, + }, + )]) + ); + + let turn_completed = timeout(DEFAULT_READ_TIMEOUT, async { + loop { + let turn_completed_notif = mcp + .read_stream_until_notification_message("turn/completed") + .await?; + let turn_completed: TurnCompletedNotification = serde_json::from_value( + turn_completed_notif.params.expect("turn/completed params"), + )?; + if turn_completed.thread_id == thread.id && turn_completed.turn.id == turn.turn.id { + return Ok::(turn_completed); + } + } + }) + .await??; + assert_eq!(turn_completed.thread_id, thread.id); + + Ok(()) +} + #[tokio::test] async fn turn_start_file_change_approval_accept_for_session_persists_v2() -> Result<()> { skip_if_no_network!(Ok(())); diff --git a/codex-rs/core/src/agent/control.rs b/codex-rs/core/src/agent/control.rs index fc06fcddeaa..83e6a3a0449 100644 --- a/codex-rs/core/src/agent/control.rs +++ b/codex-rs/core/src/agent/control.rs @@ -3,6 +3,7 @@ use crate::agent::guards::Guards; use crate::agent::role::DEFAULT_ROLE_NAME; use crate::agent::role::resolve_role_config; use crate::agent::status::is_final; +use crate::codex_thread::ThreadConfigSnapshot; use crate::error::CodexErr; use crate::error::Result as CodexResult; use crate::find_thread_path_by_id_str; @@ -360,6 +361,19 @@ impl AgentControl { )) } + pub(crate) async fn get_agent_config_snapshot( + &self, + agent_id: ThreadId, + ) -> Option { + let Ok(state) = self.upgrade() else { + return None; + }; + let Ok(thread) = state.get_thread(agent_id).await else { + return None; + }; + Some(thread.config_snapshot().await) + } + /// Subscribe to status updates for `agent_id`, yielding the latest value and changes. pub(crate) async fn subscribe_status( &self, diff --git a/codex-rs/core/src/tools/handlers/multi_agents/spawn.rs b/codex-rs/core/src/tools/handlers/multi_agents/spawn.rs index 26052c6d4f6..7a27cd94c8d 100644 --- a/codex-rs/core/src/tools/handlers/multi_agents/spawn.rs +++ b/codex-rs/core/src/tools/handlers/multi_agents/spawn.rs @@ -98,15 +98,37 @@ impl ToolHandler for Handler { ), Err(_) => (None, AgentStatus::NotFound), }; - let (new_agent_nickname, new_agent_role) = match new_thread_id { - Some(thread_id) => session + let agent_snapshot = match new_thread_id { + Some(thread_id) => { + session + .services + .agent_control + .get_agent_config_snapshot(thread_id) + .await + } + None => None, + }; + let (new_agent_nickname, new_agent_role) = match (&agent_snapshot, new_thread_id) { + (Some(snapshot), _) => ( + snapshot.session_source.get_nickname(), + snapshot.session_source.get_agent_role(), + ), + (None, Some(thread_id)) => session .services .agent_control .get_agent_nickname_and_role(thread_id) .await .unwrap_or((None, None)), - None => (None, None), + (None, None) => (None, None), }; + let effective_model = agent_snapshot + .as_ref() + .map(|snapshot| snapshot.model.clone()) + .unwrap_or_else(|| args.model.clone().unwrap_or_default()); + let effective_reasoning_effort = agent_snapshot + .as_ref() + .and_then(|snapshot| snapshot.reasoning_effort) + .unwrap_or(args.reasoning_effort.unwrap_or_default()); let nickname = new_agent_nickname.clone(); session .send_event( @@ -118,8 +140,8 @@ impl ToolHandler for Handler { new_agent_nickname, new_agent_role, prompt, - model: args.model.clone().unwrap_or_default(), - reasoning_effort: args.reasoning_effort.unwrap_or_default(), + model: effective_model, + reasoning_effort: effective_reasoning_effort, status, } .into(), diff --git a/codex-rs/protocol/src/protocol.rs b/codex-rs/protocol/src/protocol.rs index 152743b3e13..daf3b7d74a3 100644 --- a/codex-rs/protocol/src/protocol.rs +++ b/codex-rs/protocol/src/protocol.rs @@ -3263,9 +3263,9 @@ pub struct CollabAgentSpawnEndEvent { /// Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the /// beginning. pub prompt: String, - /// Model requested for the spawned agent. + /// Effective model used by the spawned agent after inheritance and role overrides. pub model: String, - /// Reasoning effort requested for the spawned agent. + /// Effective reasoning effort used by the spawned agent after inheritance and role overrides. pub reasoning_effort: ReasoningEffortConfig, /// Last known status of the new agent reported to the sender agent. pub status: AgentStatus,