Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 57 additions & 0 deletions src/auth/codex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use anyhow::{Context, Result};
use base64::{Engine, engine::general_purpose::URL_SAFE_NO_PAD};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::BTreeMap;
use std::path::PathBuf;
use std::sync::RwLock;

Expand Down Expand Up @@ -56,6 +57,20 @@ struct LegacyTokens {
expires_at: Option<i64>,
}

#[derive(Debug, Deserialize)]
struct CodexCliConfig {
model_provider: Option<String>,
#[serde(default)]
model_providers: BTreeMap<String, CodexCliModelProvider>,
}

#[derive(Debug, Deserialize)]
struct CodexCliModelProvider {
base_url: Option<String>,
wire_api: Option<String>,
requires_openai_auth: Option<bool>,
}

static ACTIVE_ACCOUNT_OVERRIDE: RwLock<Option<String>> = RwLock::new(None);
const ACCOUNT_LABEL_PREFIX: &str = "openai";

Expand Down Expand Up @@ -118,6 +133,48 @@ fn legacy_auth_path() -> Result<PathBuf> {
crate::storage::user_home_path(".codex/auth.json")
}

fn legacy_config_path() -> Result<PathBuf> {
crate::storage::user_home_path(".codex/config.toml")
}

/// Return the active Codex CLI Responses API base URL when it is safe to reuse
/// with the same OpenAI/Codex credentials jcode already trusts.
///
/// Codex supports model-provider routing in `~/.codex/config.toml`; if the
/// selected provider explicitly uses the Responses wire API and OpenAI auth,
/// jcode should send its OpenAI provider traffic to that base instead of the
/// baked-in OpenAI/ChatGPT endpoints. This keeps jcode aligned with a local
/// Codex setup such as an authenticated gateway.
pub fn configured_responses_base_url() -> Option<String> {
let path = legacy_config_path().ok()?;
let content = std::fs::read_to_string(path).ok()?;
let config: CodexCliConfig = toml::from_str(&content).ok()?;
let provider_name = config.model_provider?.trim().to_string();
if provider_name.is_empty() {
return None;
}
let provider = config.model_providers.get(&provider_name)?;
let wire_api = provider.wire_api.as_deref()?.trim();
if !wire_api.eq_ignore_ascii_case("responses") {
return None;
}
if provider.requires_openai_auth != Some(true) {
return None;
}
normalize_responses_base_url(provider.base_url.as_deref()?)
}

fn normalize_responses_base_url(raw: &str) -> Option<String> {
let trimmed = raw.trim().trim_end_matches('/');
if trimmed.is_empty() {
return None;
}
if !(trimmed.starts_with("https://") || trimmed.starts_with("http://")) {
return None;
}
Some(trimmed.to_string())
}

pub fn legacy_auth_file_path() -> Result<PathBuf> {
legacy_auth_path()
}
Expand Down
49 changes: 49 additions & 0 deletions src/auth/codex_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,55 @@ fn auth_file_with_api_key_only() {
assert_eq!(file.api_key, Some("sk-test-key-123".to_string()));
}

#[test]
fn configured_responses_base_url_uses_active_codex_provider() {
let _lock = crate::storage::lock_test_env();
let temp = tempfile::TempDir::new().unwrap();
let _home = EnvVarGuard::set_path("JCODE_HOME", temp.path());
let codex_dir = temp.path().join("external/.codex");
std::fs::create_dir_all(&codex_dir).unwrap();
std::fs::write(
codex_dir.join("config.toml"),
r#"
model_provider = "InputAI"

[model_providers.InputAI]
base_url = "https://ai.input.im/"
wire_api = "responses"
requires_openai_auth = true
"#,
)
.unwrap();

assert_eq!(
configured_responses_base_url().as_deref(),
Some("https://ai.input.im")
);
}

#[test]
fn configured_responses_base_url_ignores_non_responses_provider() {
let _lock = crate::storage::lock_test_env();
let temp = tempfile::TempDir::new().unwrap();
let _home = EnvVarGuard::set_path("JCODE_HOME", temp.path());
let codex_dir = temp.path().join("external/.codex");
std::fs::create_dir_all(&codex_dir).unwrap();
std::fs::write(
codex_dir.join("config.toml"),
r#"
model_provider = "Other"

[model_providers.Other]
base_url = "https://example.invalid/v1"
wire_api = "chat_completions"
requires_openai_auth = true
"#,
)
.unwrap();

assert!(configured_responses_base_url().is_none());
}

#[test]
fn auth_file_minimal_tokens() {
let json = r#"{
Expand Down
73 changes: 40 additions & 33 deletions src/provider/openai.rs
Original file line number Diff line number Diff line change
Expand Up @@ -668,11 +668,14 @@ impl OpenAIProvider {
}

fn responses_url(credentials: &CodexCredentials) -> String {
let base = if Self::is_chatgpt_mode(credentials) {
CHATGPT_API_BASE
} else {
OPENAI_API_BASE
};
let configured_base = crate::auth::codex::configured_responses_base_url();
let base = configured_base.as_deref().unwrap_or_else(|| {
if Self::is_chatgpt_mode(credentials) {
CHATGPT_API_BASE
} else {
OPENAI_API_BASE
}
});
format!("{}/{}", base.trim_end_matches('/'), RESPONSES_PATH)
}

Expand All @@ -696,6 +699,7 @@ impl OpenAIProvider {
input: &[Value],
api_tools: &[Value],
is_chatgpt_mode: bool,
allow_optional_params: bool,
max_output_tokens: Option<u32>,
reasoning_effort: Option<&str>,
service_tier: Option<&str>,
Expand All @@ -713,42 +717,45 @@ impl OpenAIProvider {
"instructions": instructions,
"input": input,
"tools": tools,
"tool_choice": "auto",
"parallel_tool_calls": false,
"stream": true,
"store": false,
"include": ["reasoning.encrypted_content"],
});

if !is_chatgpt_mode && let Some(max_output_tokens) = max_output_tokens {
request["max_output_tokens"] = serde_json::json!(max_output_tokens);
}
if allow_optional_params {
request["tool_choice"] = serde_json::json!("auto");
request["parallel_tool_calls"] = serde_json::json!(false);
request["store"] = serde_json::json!(false);
request["include"] = serde_json::json!(["reasoning.encrypted_content"]);

if let Some(effort) = reasoning_effort {
request["reasoning"] = serde_json::json!({ "effort": effort });
}
if !is_chatgpt_mode && let Some(max_output_tokens) = max_output_tokens {
request["max_output_tokens"] = serde_json::json!(max_output_tokens);
}

if let Some(service_tier) = service_tier {
request["service_tier"] = serde_json::json!(service_tier);
}
if let Some(effort) = reasoning_effort {
request["reasoning"] = serde_json::json!({ "effort": effort });
}

if let Some(compact_threshold) = native_compaction_threshold {
request["context_management"] = serde_json::json!([
{
"type": "compaction",
"compact_threshold": compact_threshold,
}
]);
}
if let Some(service_tier) = service_tier {
request["service_tier"] = serde_json::json!(service_tier);
}

if !is_chatgpt_mode {
if let Some(key) = prompt_cache_key {
request["prompt_cache_key"] = serde_json::json!(key);
if let Some(compact_threshold) = native_compaction_threshold {
request["context_management"] = serde_json::json!([
{
"type": "compaction",
"compact_threshold": compact_threshold,
}
]);
}
if let Some(retention) =
Self::effective_prompt_cache_retention(model_id, prompt_cache_retention)
{
request["prompt_cache_retention"] = serde_json::json!(retention);

if !is_chatgpt_mode {
if let Some(key) = prompt_cache_key {
request["prompt_cache_key"] = serde_json::json!(key);
}
if let Some(retention) =
Self::effective_prompt_cache_retention(model_id, prompt_cache_retention)
{
request["prompt_cache_retention"] = serde_json::json!(retention);
}
}
}

Expand Down
2 changes: 2 additions & 0 deletions src/provider/openai_provider_impl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ impl Provider for OpenAIProvider {
};
(instructions, is_chatgpt)
};
let allow_optional_params = crate::auth::codex::configured_responses_base_url().is_none();
let reasoning_effort = self
.reasoning_effort
.read()
Expand All @@ -44,6 +45,7 @@ impl Provider for OpenAIProvider {
&input,
&api_tools,
is_chatgpt_mode,
allow_optional_params,
self.max_output_tokens,
reasoning_effort.as_deref(),
service_tier.as_deref(),
Expand Down
66 changes: 66 additions & 0 deletions src/provider/openai_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,72 @@ async fn live_openai_smoke(model: &str, sentinel: &str) -> Result<Option<String>
Ok(Some(response))
}

#[test]
fn responses_url_uses_active_codex_provider_base_url() {
let _lock = crate::storage::lock_test_env();
let temp = tempfile::TempDir::new().unwrap();
let _home = EnvVarGuard::set_path("JCODE_HOME", temp.path());
let codex_dir = temp.path().join("external/.codex");
std::fs::create_dir_all(&codex_dir).unwrap();
std::fs::write(
codex_dir.join("config.toml"),
r#"
model_provider = "InputAI"

[model_providers.InputAI]
base_url = "https://ai.input.im"
wire_api = "responses"
requires_openai_auth = true
"#,
)
.unwrap();
let credentials = CodexCredentials {
access_token: "sk-test".to_string(),
refresh_token: String::new(),
id_token: None,
account_id: None,
expires_at: None,
};

assert_eq!(
OpenAIProvider::responses_url(&credentials),
"https://ai.input.im/responses"
);
}

#[test]
fn configured_base_url_request_omits_openai_direct_options() {
let request = OpenAIProvider::build_response_request(
"gpt-5.5",
"system".to_string(),
&[],
&[],
false,
false,
Some(1234),
Some("xhigh"),
Some("fast"),
Some("cache-key"),
Some("24h"),
Some(4096),
);

for key in [
"max_output_tokens",
"reasoning",
"service_tier",
"prompt_cache_key",
"prompt_cache_retention",
"context_management",
"include",
"tool_choice",
"parallel_tool_calls",
"store",
] {
assert!(request.get(key).is_none(), "unexpected {key}: {request}");
}
}

include!("openai_tests/models_state.rs");
include!("openai_tests/responses_input.rs");
include!("openai_tests/transport_runtime.rs");
Expand Down
4 changes: 4 additions & 0 deletions src/provider/openai_tests/payloads.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ fn test_build_response_request_includes_stream_for_http() {
&[],
&[],
false,
true,
Some(DEFAULT_MAX_OUTPUT_TOKENS),
None,
None,
Expand All @@ -25,6 +26,7 @@ fn test_websocket_payload_strips_stream_and_background() {
&[serde_json::json!({"role": "user", "content": "hello"})],
&[],
false,
true,
Some(DEFAULT_MAX_OUTPUT_TOKENS),
None,
None,
Expand Down Expand Up @@ -64,6 +66,7 @@ fn test_websocket_payload_preserves_required_fields() {
&[serde_json::json!({"role": "user", "content": "hello"})],
&[serde_json::json!({"type": "function", "name": "bash"})],
false,
true,
Some(16384),
Some("high"),
None,
Expand Down Expand Up @@ -98,6 +101,7 @@ fn test_websocket_continuation_request_excludes_transport_fields() {
&[],
&[serde_json::json!({"type": "function", "name": "bash"})],
false,
true,
Some(DEFAULT_MAX_OUTPUT_TOKENS),
None,
None,
Expand Down
Loading