From 59661c43101339ec467914164eaf71cb9f1868a9 Mon Sep 17 00:00:00 2001 From: Saatvik Arya Date: Sun, 4 Jan 2026 13:44:39 +0530 Subject: [PATCH 1/2] feat(config): add OpenCode variants system support and split config files - Add opencode-modern.json for v1.0.210+ using variants system (6 base models, ~150 lines) - Add opencode-legacy.json for v1.0.209 and below (20+ separate model entries, 572 lines) - Remove full-opencode.json (replaced by version-specific configs) - Add variants field to UserConfig type for modern config format - Add providerOptions to RequestBody type to receive variant config from OpenCode - Update getReasoningConfig() to accept externalConfig parameter for variant selection - Enables variant cycling with Ctrl+T in OpenCode v1.0.210+ - Convert tabs to spaces and standardize quote style (no functional changes) The modern config reduces size by 74% while maintaining same functionality. Both configs provide access to all GPT-5.2/5.1 model families with proper reasoning effort variants. --- ...ull-opencode.json => opencode-legacy.json} | 0 config/opencode-modern.json | 239 ++ lib/request/request-transformer.ts | 966 ++--- lib/types.ts | 12 + test/config.test.ts | 318 +- test/request-transformer.test.ts | 3168 ++++++++++------- 6 files changed, 2817 insertions(+), 1886 deletions(-) rename config/{full-opencode.json => opencode-legacy.json} (100%) create mode 100644 config/opencode-modern.json diff --git a/config/full-opencode.json b/config/opencode-legacy.json similarity index 100% rename from config/full-opencode.json rename to config/opencode-legacy.json diff --git a/config/opencode-modern.json b/config/opencode-modern.json new file mode 100644 index 0000000..c274f47 --- /dev/null +++ b/config/opencode-modern.json @@ -0,0 +1,239 @@ +{ + "$schema": "https://opencode.ai/config.json", + "plugin": [ + "opencode-openai-codex-auth@4.2.0" + ], + "provider": { + "openai": { + "options": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium", + "include": [ + "reasoning.encrypted_content" + ], + "store": false + }, + "models": { + "gpt-5.2": { + "name": "GPT 5.2 (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "none": { + "reasoningEffort": "none", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.2-codex": { + "name": "GPT 5.2 Codex (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.1-codex-max": { + "name": "GPT 5.1 Codex Max (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "low": { + "reasoningEffort": "low", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.1-codex": { + "name": "GPT 5.1 Codex (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.1-codex-mini": { + "name": "GPT 5.1 Codex Mini (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.1": { + "name": "GPT 5.1 (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "none": { + "reasoningEffort": "none", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "low" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "high" + } + } + } + } + } + } +} diff --git a/lib/request/request-transformer.ts b/lib/request/request-transformer.ts index 05705ed..22355fa 100644 --- a/lib/request/request-transformer.ts +++ b/lib/request/request-transformer.ts @@ -4,11 +4,11 @@ import { CODEX_OPENCODE_BRIDGE } from "../prompts/codex-opencode-bridge.js"; import { getOpenCodeCodexPrompt } from "../prompts/opencode-codex.js"; import { getNormalizedModel } from "./helpers/model-map.js"; import type { - ConfigOptions, - InputItem, - ReasoningConfig, - RequestBody, - UserConfig, + ConfigOptions, + InputItem, + ReasoningConfig, + RequestBody, + UserConfig, } from "../types.js"; /** @@ -21,86 +21,86 @@ import type { * @returns Normalized model name (e.g., "gpt-5.1-codex", "gpt-5-codex") */ export function normalizeModel(model: string | undefined): string { - if (!model) return "gpt-5.1"; - - // Strip provider prefix if present (e.g., "openai/gpt-5-codex" → "gpt-5-codex") - const modelId = model.includes("/") ? model.split("/").pop()! : model; - - // Try explicit model map first (handles all known model variants) - const mappedModel = getNormalizedModel(modelId); - if (mappedModel) { - return mappedModel; - } - - // Fallback: Pattern-based matching for unknown/custom model names - // This preserves backwards compatibility with old verbose names - // like "GPT 5 Codex Low (ChatGPT Subscription)" - const normalized = modelId.toLowerCase(); - - // Priority order for pattern matching (most specific first): - // 1. GPT-5.2 Codex (newest codex model) - if ( - normalized.includes("gpt-5.2-codex") || - normalized.includes("gpt 5.2 codex") - ) { - return "gpt-5.2-codex"; - } - - // 2. GPT-5.2 (general purpose) - if (normalized.includes("gpt-5.2") || normalized.includes("gpt 5.2")) { - return "gpt-5.2"; - } - - // 3. GPT-5.1 Codex Max - if ( - normalized.includes("gpt-5.1-codex-max") || - normalized.includes("gpt 5.1 codex max") - ) { - return "gpt-5.1-codex-max"; - } - - // 4. GPT-5.1 Codex Mini - if ( - normalized.includes("gpt-5.1-codex-mini") || - normalized.includes("gpt 5.1 codex mini") - ) { - return "gpt-5.1-codex-mini"; - } - - // 5. Legacy Codex Mini - if ( - normalized.includes("codex-mini-latest") || - normalized.includes("gpt-5-codex-mini") || - normalized.includes("gpt 5 codex mini") - ) { - return "codex-mini-latest"; - } - - // 6. GPT-5.1 Codex - if ( - normalized.includes("gpt-5.1-codex") || - normalized.includes("gpt 5.1 codex") - ) { - return "gpt-5.1-codex"; - } - - // 7. GPT-5.1 (general-purpose) - if (normalized.includes("gpt-5.1") || normalized.includes("gpt 5.1")) { - return "gpt-5.1"; - } - - // 8. GPT-5 Codex family (any variant with "codex") - if (normalized.includes("codex")) { - return "gpt-5.1-codex"; - } - - // 9. GPT-5 family (any variant) - default to 5.1 as 5 is being phased out - if (normalized.includes("gpt-5") || normalized.includes("gpt 5")) { - return "gpt-5.1"; - } - - // Default fallback - use gpt-5.1 as gpt-5 is being phased out - return "gpt-5.1"; + if (!model) return "gpt-5.1"; + + // Strip provider prefix if present (e.g., "openai/gpt-5-codex" → "gpt-5-codex") + const modelId = model.includes("/") ? model.split("/").pop()! : model; + + // Try explicit model map first (handles all known model variants) + const mappedModel = getNormalizedModel(modelId); + if (mappedModel) { + return mappedModel; + } + + // Fallback: Pattern-based matching for unknown/custom model names + // This preserves backwards compatibility with old verbose names + // like "GPT 5 Codex Low (ChatGPT Subscription)" + const normalized = modelId.toLowerCase(); + + // Priority order for pattern matching (most specific first): + // 1. GPT-5.2 Codex (newest codex model) + if ( + normalized.includes("gpt-5.2-codex") || + normalized.includes("gpt 5.2 codex") + ) { + return "gpt-5.2-codex"; + } + + // 2. GPT-5.2 (general purpose) + if (normalized.includes("gpt-5.2") || normalized.includes("gpt 5.2")) { + return "gpt-5.2"; + } + + // 3. GPT-5.1 Codex Max + if ( + normalized.includes("gpt-5.1-codex-max") || + normalized.includes("gpt 5.1 codex max") + ) { + return "gpt-5.1-codex-max"; + } + + // 4. GPT-5.1 Codex Mini + if ( + normalized.includes("gpt-5.1-codex-mini") || + normalized.includes("gpt 5.1 codex mini") + ) { + return "gpt-5.1-codex-mini"; + } + + // 5. Legacy Codex Mini + if ( + normalized.includes("codex-mini-latest") || + normalized.includes("gpt-5-codex-mini") || + normalized.includes("gpt 5 codex mini") + ) { + return "codex-mini-latest"; + } + + // 6. GPT-5.1 Codex + if ( + normalized.includes("gpt-5.1-codex") || + normalized.includes("gpt 5.1 codex") + ) { + return "gpt-5.1-codex"; + } + + // 7. GPT-5.1 (general-purpose) + if (normalized.includes("gpt-5.1") || normalized.includes("gpt 5.1")) { + return "gpt-5.1"; + } + + // 8. GPT-5 Codex family (any variant with "codex") + if (normalized.includes("codex")) { + return "gpt-5.1-codex"; + } + + // 9. GPT-5 family (any variant) - default to 5.1 as 5 is being phased out + if (normalized.includes("gpt-5") || normalized.includes("gpt 5")) { + return "gpt-5.1"; + } + + // Default fallback - use gpt-5.1 as gpt-5 is being phased out + return "gpt-5.1"; } /** @@ -111,14 +111,14 @@ export function normalizeModel(model: string | undefined): string { * @returns Merged configuration for this model */ export function getModelConfig( - modelName: string, - userConfig: UserConfig = { global: {}, models: {} }, + modelName: string, + userConfig: UserConfig = { global: {}, models: {} }, ): ConfigOptions { - const globalOptions = userConfig.global || {}; - const modelOptions = userConfig.models?.[modelName]?.options || {}; + const globalOptions = userConfig.global || {}; + const modelOptions = userConfig.models?.[modelName]?.options || {}; - // Model-specific options override global options - return { ...globalOptions, ...modelOptions }; + // Model-specific options override global options + return { ...globalOptions, ...modelOptions }; } /** @@ -130,103 +130,115 @@ export function getModelConfig( * - Codex CLI has been thoroughly tested against this backend * * @param originalModel - Original model name before normalization - * @param userConfig - User configuration object + * @param userConfig - User configuration from plugin config file + * @param externalConfig - External configuration from OpenCode's providerOptions (variant selection) * @returns Reasoning configuration */ export function getReasoningConfig( - modelName: string | undefined, - userConfig: ConfigOptions = {}, + modelName: string | undefined, + userConfig: ConfigOptions = {}, + externalConfig: ConfigOptions = {}, ): ReasoningConfig { - const normalizedName = modelName?.toLowerCase() ?? ""; - - // GPT-5.2 Codex is the newest codex model (supports xhigh, but not "none") - const isGpt52Codex = - normalizedName.includes("gpt-5.2-codex") || - normalizedName.includes("gpt 5.2 codex"); - - // GPT-5.2 general purpose (not codex variant) - const isGpt52General = - (normalizedName.includes("gpt-5.2") || normalizedName.includes("gpt 5.2")) && - !isGpt52Codex; - const isCodexMax = - normalizedName.includes("codex-max") || - normalizedName.includes("codex max"); - const isCodexMini = - normalizedName.includes("codex-mini") || - normalizedName.includes("codex mini") || - normalizedName.includes("codex_mini") || - normalizedName.includes("codex-mini-latest"); - const isCodex = normalizedName.includes("codex") && !isCodexMini; - const isLightweight = - !isCodexMini && - (normalizedName.includes("nano") || - normalizedName.includes("mini")); - - // GPT-5.1 general purpose (not codex variants) - supports "none" per OpenAI API docs - const isGpt51General = - (normalizedName.includes("gpt-5.1") || normalizedName.includes("gpt 5.1")) && - !isCodex && - !isCodexMax && - !isCodexMini; - - // GPT 5.2, GPT 5.2 Codex, and Codex Max support xhigh reasoning - const supportsXhigh = isGpt52General || isGpt52Codex || isCodexMax; - - // GPT 5.1 general and GPT 5.2 general support "none" reasoning per: - // - OpenAI API docs: "gpt-5.1 defaults to none, supports: none, low, medium, high" - // - Codex CLI: ReasoningEffort enum includes None variant (codex-rs/protocol/src/openai_models.rs) - // - Codex CLI: docs/config.md lists "none" as valid for model_reasoning_effort - // - gpt-5.2 (being newer) also supports: none, low, medium, high, xhigh - // - Codex models (including GPT-5.2 Codex) do NOT support "none" - const supportsNone = isGpt52General || isGpt51General; - - // Default based on model type (Codex CLI defaults) - // Note: OpenAI docs say gpt-5.1 defaults to "none", but we default to "medium" - // for better coding assistance unless user explicitly requests "none" - const defaultEffort: ReasoningConfig["effort"] = isCodexMini - ? "medium" - : supportsXhigh - ? "high" - : isLightweight - ? "minimal" - : "medium"; - - // Get user-requested effort - let effort = userConfig.reasoningEffort || defaultEffort; - - if (isCodexMini) { - if (effort === "minimal" || effort === "low" || effort === "none") { - effort = "medium"; - } - if (effort === "xhigh") { - effort = "high"; - } - if (effort !== "high" && effort !== "medium") { - effort = "medium"; - } - } - - // For models that don't support xhigh, downgrade to high - if (!supportsXhigh && effort === "xhigh") { - effort = "high"; - } - - // For models that don't support "none", upgrade to "low" - // (Codex models don't support "none" - only GPT-5.1 and GPT-5.2 general purpose do) - if (!supportsNone && effort === "none") { - effort = "low"; - } - - // Normalize "minimal" to "low" for Codex families - // Codex CLI presets are low/medium/high (or xhigh for Codex Max / GPT-5.2 Codex) - if (isCodex && effort === "minimal") { - effort = "low"; - } - - return { - effort, - summary: userConfig.reasoningSummary || "auto", // Changed from "detailed" to match Codex CLI - }; + const normalizedName = modelName?.toLowerCase() ?? ""; + + // GPT-5.2 Codex is the newest codex model (supports xhigh, but not "none") + const isGpt52Codex = + normalizedName.includes("gpt-5.2-codex") || + normalizedName.includes("gpt 5.2 codex"); + + // GPT-5.2 general purpose (not codex variant) + const isGpt52General = + (normalizedName.includes("gpt-5.2") || + normalizedName.includes("gpt 5.2")) && + !isGpt52Codex; + const isCodexMax = + normalizedName.includes("codex-max") || + normalizedName.includes("codex max"); + const isCodexMini = + normalizedName.includes("codex-mini") || + normalizedName.includes("codex mini") || + normalizedName.includes("codex_mini") || + normalizedName.includes("codex-mini-latest"); + const isCodex = normalizedName.includes("codex") && !isCodexMini; + const isLightweight = + !isCodexMini && + (normalizedName.includes("nano") || normalizedName.includes("mini")); + + // GPT-5.1 general purpose (not codex variants) - supports "none" per OpenAI API docs + const isGpt51General = + (normalizedName.includes("gpt-5.1") || + normalizedName.includes("gpt 5.1")) && + !isCodex && + !isCodexMax && + !isCodexMini; + + // GPT 5.2, GPT 5.2 Codex, and Codex Max support xhigh reasoning + const supportsXhigh = isGpt52General || isGpt52Codex || isCodexMax; + + // GPT 5.1 general and GPT 5.2 general support "none" reasoning per: + // - OpenAI API docs: "gpt-5.1 defaults to none, supports: none, low, medium, high" + // - Codex CLI: ReasoningEffort enum includes None variant (codex-rs/protocol/src/openai_models.rs) + // - Codex CLI: docs/config.md lists "none" as valid for model_reasoning_effort + // - gpt-5.2 (being newer) also supports: none, low, medium, high, xhigh + // - Codex models (including GPT-5.2 Codex) do NOT support "none" + const supportsNone = isGpt52General || isGpt51General; + + // Default based on model type (Codex CLI defaults) + // Note: OpenAI docs say gpt-5.1 defaults to "none", but we default to "medium" + // for better coding assistance unless user explicitly requests "none" + const defaultEffort: ReasoningConfig["effort"] = isCodexMini + ? "medium" + : supportsXhigh + ? "high" + : isLightweight + ? "minimal" + : "medium"; + + // Priority: externalConfig (from OpenCode variant) > userConfig > computed defaults + // This allows variant selection via Ctrl+T to override plugin defaults + let effort = + externalConfig.reasoningEffort ?? + userConfig.reasoningEffort ?? + defaultEffort; + + if (isCodexMini) { + if (effort === "minimal" || effort === "low" || effort === "none") { + effort = "medium"; + } + if (effort === "xhigh") { + effort = "high"; + } + if (effort !== "high" && effort !== "medium") { + effort = "medium"; + } + } + + // For models that don't support xhigh, downgrade to high + if (!supportsXhigh && effort === "xhigh") { + effort = "high"; + } + + // For models that don't support "none", upgrade to "low" + // (Codex models don't support "none" - only GPT-5.1 and GPT-5.2 general purpose do) + if (!supportsNone && effort === "none") { + effort = "low"; + } + + // Normalize "minimal" to "low" for ALL GPT-5.1/5.2 models + // "minimal" is NOT supported by any gpt-5.1 or gpt-5.2 model (general or codex) + // It was deprecated when transitioning from gpt-5 to gpt-5.1 + if (effort === "minimal") { + effort = "low"; + } + + // Priority for summary: externalConfig > userConfig > "auto" (Codex CLI default) + const summary = + externalConfig.reasoningSummary ?? userConfig.reasoningSummary ?? "auto"; + + return { + effort, + summary, + }; } /** @@ -252,26 +264,26 @@ export function getReasoningConfig( * @returns Filtered input array compatible with Codex API */ export function filterInput( - input: InputItem[] | undefined, + input: InputItem[] | undefined, ): InputItem[] | undefined { - if (!Array.isArray(input)) return input; - - return input - .filter((item) => { - // Remove AI SDK constructs not supported by Codex API - if (item.type === "item_reference") { - return false; // AI SDK only - references server state - } - return true; // Keep all other items - }) - .map((item) => { - // Strip IDs from all items (Codex API stateless mode) - if (item.id) { - const { id, ...itemWithoutId } = item; - return itemWithoutId as InputItem; - } - return item; - }); + if (!Array.isArray(input)) return input; + + return input + .filter((item) => { + // Remove AI SDK constructs not supported by Codex API + if (item.type === "item_reference") { + return false; // AI SDK only - references server state + } + return true; // Keep all other items + }) + .map((item) => { + // Strip IDs from all items (Codex API stateless mode) + if (item.id) { + const { id, ...itemWithoutId } = item; + return itemWithoutId as InputItem; + } + return item; + }); } /** @@ -282,46 +294,46 @@ export function filterInput( * @returns True if this is the OpenCode system prompt */ export function isOpenCodeSystemPrompt( - item: InputItem, - cachedPrompt: string | null, + item: InputItem, + cachedPrompt: string | null, ): boolean { - const isSystemRole = item.role === "developer" || item.role === "system"; - if (!isSystemRole) return false; - - const getContentText = (item: InputItem): string => { - if (typeof item.content === "string") { - return item.content; - } - if (Array.isArray(item.content)) { - return item.content - .filter((c) => c.type === "input_text" && c.text) - .map((c) => c.text) - .join("\n"); - } - return ""; - }; - - const contentText = getContentText(item); - if (!contentText) return false; - - // Primary check: Compare against cached OpenCode prompt - if (cachedPrompt) { - // Exact match (trim whitespace for comparison) - if (contentText.trim() === cachedPrompt.trim()) { - return true; - } - - // Partial match: Check if first 200 chars match (handles minor variations) - const contentPrefix = contentText.trim().substring(0, 200); - const cachedPrefix = cachedPrompt.trim().substring(0, 200); - if (contentPrefix === cachedPrefix) { - return true; - } - } - - // Fallback check: Known OpenCode prompt signature (for safety) - // This catches the prompt even if cache fails - return contentText.startsWith("You are a coding agent running in"); + const isSystemRole = item.role === "developer" || item.role === "system"; + if (!isSystemRole) return false; + + const getContentText = (item: InputItem): string => { + if (typeof item.content === "string") { + return item.content; + } + if (Array.isArray(item.content)) { + return item.content + .filter((c) => c.type === "input_text" && c.text) + .map((c) => c.text) + .join("\n"); + } + return ""; + }; + + const contentText = getContentText(item); + if (!contentText) return false; + + // Primary check: Compare against cached OpenCode prompt + if (cachedPrompt) { + // Exact match (trim whitespace for comparison) + if (contentText.trim() === cachedPrompt.trim()) { + return true; + } + + // Partial match: Check if first 200 chars match (handles minor variations) + const contentPrefix = contentText.trim().substring(0, 200); + const cachedPrefix = cachedPrompt.trim().substring(0, 200); + if (contentPrefix === cachedPrefix) { + return true; + } + } + + // Fallback check: Known OpenCode prompt signature (for safety) + // This catches the prompt even if cache fails + return contentText.startsWith("You are a coding agent running in"); } /** @@ -331,25 +343,25 @@ export function isOpenCodeSystemPrompt( * @returns Input array without OpenCode system prompts */ export async function filterOpenCodeSystemPrompts( - input: InputItem[] | undefined, + input: InputItem[] | undefined, ): Promise { - if (!Array.isArray(input)) return input; - - // Fetch cached OpenCode prompt for verification - let cachedPrompt: string | null = null; - try { - cachedPrompt = await getOpenCodeCodexPrompt(); - } catch { - // If fetch fails, fallback to text-based detection only - // This is safe because we still have the "starts with" check - } - - return input.filter((item) => { - // Keep user messages - if (item.role === "user") return true; - // Filter out OpenCode system prompts - return !isOpenCodeSystemPrompt(item, cachedPrompt); - }); + if (!Array.isArray(input)) return input; + + // Fetch cached OpenCode prompt for verification + let cachedPrompt: string | null = null; + try { + cachedPrompt = await getOpenCodeCodexPrompt(); + } catch { + // If fetch fails, fallback to text-based detection only + // This is safe because we still have the "starts with" check + } + + return input.filter((item) => { + // Keep user messages + if (item.role === "user") return true; + // Filter out OpenCode system prompts + return !isOpenCodeSystemPrompt(item, cachedPrompt); + }); } /** @@ -359,23 +371,23 @@ export async function filterOpenCodeSystemPrompts( * @returns Input array with bridge message prepended if needed */ export function addCodexBridgeMessage( - input: InputItem[] | undefined, - hasTools: boolean, + input: InputItem[] | undefined, + hasTools: boolean, ): InputItem[] | undefined { - if (!hasTools || !Array.isArray(input)) return input; - - const bridgeMessage: InputItem = { - type: "message", - role: "developer", - content: [ - { - type: "input_text", - text: CODEX_OPENCODE_BRIDGE, - }, - ], - }; - - return [bridgeMessage, ...input]; + if (!hasTools || !Array.isArray(input)) return input; + + const bridgeMessage: InputItem = { + type: "message", + role: "developer", + content: [ + { + type: "input_text", + text: CODEX_OPENCODE_BRIDGE, + }, + ], + }; + + return [bridgeMessage, ...input]; } /** @@ -385,23 +397,79 @@ export function addCodexBridgeMessage( * @returns Input array with tool remap message prepended if needed */ export function addToolRemapMessage( - input: InputItem[] | undefined, - hasTools: boolean, + input: InputItem[] | undefined, + hasTools: boolean, ): InputItem[] | undefined { - if (!hasTools || !Array.isArray(input)) return input; - - const toolRemapMessage: InputItem = { - type: "message", - role: "developer", - content: [ - { - type: "input_text", - text: TOOL_REMAP_MESSAGE, - }, - ], - }; - - return [toolRemapMessage, ...input]; + if (!hasTools || !Array.isArray(input)) return input; + + const toolRemapMessage: InputItem = { + type: "message", + role: "developer", + content: [ + { + type: "input_text", + text: TOOL_REMAP_MESSAGE, + }, + ], + }; + + return [toolRemapMessage, ...input]; +} + +/** + * Validate and normalize reasoning effort based on model-specific constraints + * This is used when the AI SDK already provides a reasoning effort from variant selection, + * but we need to ensure it's valid for the specific model (e.g., downgrade xhigh to high) + * + * @param effort - The reasoning effort to validate + * @param modelName - The normalized model name + * @returns Validated effort with model-specific fixes applied + */ +function validateReasoningEffort( + effort: string, + modelName: string, +): ReasoningConfig["effort"] { + const normalizedName = modelName.toLowerCase(); + const isCodexMini = normalizedName.includes("codex-mini"); + const isCodex = normalizedName.includes("codex") && !isCodexMini; + const supportsXhigh = + normalizedName.includes("gpt-5.2") || normalizedName.includes("codex-max"); + const supportsNone = + (normalizedName.includes("gpt-5.2") || + normalizedName.includes("gpt-5.1")) && + !isCodex; + + let result = effort as ReasoningConfig["effort"]; + + // Codex Mini only supports medium and high + if (isCodexMini) { + if (result === "none" || result === "minimal" || result === "low") { + result = "medium"; + } else if (result === "xhigh") { + result = "high"; + } else if (result !== "high" && result !== "medium") { + result = "medium"; + } + } + + // For models that don't support xhigh, downgrade to high + if (!supportsXhigh && result === "xhigh") { + result = "high"; + } + + // For models that don't support "none", upgrade to "low" + if (!supportsNone && result === "none") { + result = "low"; + } + + // Normalize "minimal" to "low" for ALL GPT-5.1/5.2 models + // "minimal" is NOT supported by any gpt-5.1 or gpt-5.2 model (general or codex) + // It was deprecated when transitioning from gpt-5 to gpt-5.1 + if (result === "minimal") { + result = "low"; + } + + return result; } /** @@ -412,6 +480,13 @@ export function addToolRemapMessage( * - opencode excludes gpt-5-codex from reasoning configuration * - This plugin uses store=false (stateless), requiring encrypted reasoning content * + * VARIANT SELECTION FLOW: + * - When user selects a reasoning variant via Ctrl+T or --model=openai/gpt-5.2:high, + * OpenCode passes this through the AI SDK which sets body.reasoning.effort/summary + * - We check if body.reasoning already exists (from AI SDK/variant selection) + * - If yes, we RESPECT it (don't override user's variant selection) + * - If no, we apply plugin computed defaults + * * @param body - Original request body * @param codexInstructions - Codex system instructions * @param userConfig - User configuration from loader @@ -419,134 +494,183 @@ export function addToolRemapMessage( * @returns Transformed request body */ export async function transformRequestBody( - body: RequestBody, - codexInstructions: string, - userConfig: UserConfig = { global: {}, models: {} }, - codexMode = true, + body: RequestBody, + codexInstructions: string, + userConfig: UserConfig = { global: {}, models: {} }, + codexMode = true, ): Promise { - const originalModel = body.model; - const normalizedModel = normalizeModel(body.model); - - // Get model-specific configuration using ORIGINAL model name (config key) - // This allows per-model options like "gpt-5-codex-low" to work correctly - const lookupModel = originalModel || normalizedModel; - const modelConfig = getModelConfig(lookupModel, userConfig); - - // Debug: Log which config was resolved - logDebug( - `Model config lookup: "${lookupModel}" → normalized to "${normalizedModel}" for API`, - { - hasModelSpecificConfig: !!userConfig.models?.[lookupModel], - resolvedConfig: modelConfig, - }, - ); - - // Normalize model name for API call - body.model = normalizedModel; - - // Codex required fields - // ChatGPT backend REQUIRES store=false (confirmed via testing) - body.store = false; - // Always set stream=true for API - response handling detects original intent - body.stream = true; - body.instructions = codexInstructions; - - // Prompt caching relies on the host providing a stable prompt_cache_key - // (OpenCode passes its session identifier). We no longer synthesize one here. - - // Filter and transform input - if (body.input && Array.isArray(body.input)) { - // Debug: Log original input message IDs before filtering - const originalIds = body.input - .filter((item) => item.id) - .map((item) => item.id); - if (originalIds.length > 0) { - logDebug( - `Filtering ${originalIds.length} message IDs from input:`, - originalIds, - ); - } - - body.input = filterInput(body.input); - - // Debug: Verify all IDs were removed - const remainingIds = (body.input || []) - .filter((item) => item.id) - .map((item) => item.id); - if (remainingIds.length > 0) { - logWarn( - `WARNING: ${remainingIds.length} IDs still present after filtering:`, - remainingIds, - ); - } else if (originalIds.length > 0) { - logDebug(`Successfully removed all ${originalIds.length} message IDs`); - } - - if (codexMode) { - // CODEX_MODE: Remove OpenCode system prompt, add bridge prompt - body.input = await filterOpenCodeSystemPrompts(body.input); - body.input = addCodexBridgeMessage(body.input, !!body.tools); - } else { - // DEFAULT MODE: Keep original behavior with tool remap message - body.input = addToolRemapMessage(body.input, !!body.tools); - } - - // Handle orphaned function_call_output items (where function_call was an item_reference that got filtered) - // Instead of removing orphans (which causes infinite loops as LLM loses tool results), - // convert them to messages to preserve context while avoiding API errors - if (body.input) { - const functionCallIds = new Set( - body.input - .filter((item) => item.type === "function_call" && item.call_id) - .map((item) => item.call_id), - ); - body.input = body.input.map((item) => { - if (item.type === "function_call_output" && !functionCallIds.has(item.call_id)) { - const toolName = typeof (item as any).name === "string" ? (item as any).name : "tool"; - const callId = (item as any).call_id ?? ""; - let text: string; - try { - const out = (item as any).output; - text = typeof out === "string" ? out : JSON.stringify(out); - } catch { - text = String((item as any).output ?? ""); - } - if (text.length > 16000) { - text = text.slice(0, 16000) + "\n...[truncated]"; - } - return { - type: "message", - role: "assistant", - content: `[Previous ${toolName} result; call_id=${callId}]: ${text}`, - } as InputItem; - } - return item; - }); - } - } - - // Configure reasoning (use normalized model family + model-specific config) - const reasoningConfig = getReasoningConfig(normalizedModel, modelConfig); - body.reasoning = { - ...body.reasoning, - ...reasoningConfig, - }; - - // Configure text verbosity (support user config) - // Default: "medium" (matches Codex CLI default for all GPT-5 models) - body.text = { - ...body.text, - verbosity: modelConfig.textVerbosity || "medium", - }; - - // Add include for encrypted reasoning content - // Default: ["reasoning.encrypted_content"] (required for stateless operation with store=false) - // This allows reasoning context to persist across turns without server-side storage - body.include = modelConfig.include || ["reasoning.encrypted_content"]; - - // Remove unsupported parameters - body.max_output_tokens = undefined; - body.max_completion_tokens = undefined; - - return body; + const originalModel = body.model; + const normalizedModel = normalizeModel(body.model); + + // Get model-specific configuration using ORIGINAL model name (config key) + // This allows per-model options like "gpt-5-codex-low" to work correctly + const lookupModel = originalModel || normalizedModel; + const modelConfig = getModelConfig(lookupModel, userConfig); + + // Debug: Log which config was resolved + logDebug( + `Model config lookup: "${lookupModel}" → normalized to "${normalizedModel}" for API`, + { + hasModelSpecificConfig: !!userConfig.models?.[lookupModel], + resolvedConfig: modelConfig, + }, + ); + + // Normalize model name for API call + body.model = normalizedModel; + + // Codex required fields + // ChatGPT backend REQUIRES store=false (confirmed via testing) + body.store = false; + // Always set stream=true for API - response handling detects original intent + body.stream = true; + body.instructions = codexInstructions; + + // Prompt caching relies on the host providing a stable prompt_cache_key + // (OpenCode passes its session identifier). We no longer synthesize one here. + + // Filter and transform input + if (body.input && Array.isArray(body.input)) { + // Debug: Log original input message IDs before filtering + const originalIds = body.input + .filter((item) => item.id) + .map((item) => item.id); + if (originalIds.length > 0) { + logDebug( + `Filtering ${originalIds.length} message IDs from input:`, + originalIds, + ); + } + + body.input = filterInput(body.input); + + // Debug: Verify all IDs were removed + const remainingIds = (body.input || []) + .filter((item) => item.id) + .map((item) => item.id); + if (remainingIds.length > 0) { + logWarn( + `WARNING: ${remainingIds.length} IDs still present after filtering:`, + remainingIds, + ); + } else if (originalIds.length > 0) { + logDebug(`Successfully removed all ${originalIds.length} message IDs`); + } + + if (codexMode) { + // CODEX_MODE: Remove OpenCode system prompt, add bridge prompt + body.input = await filterOpenCodeSystemPrompts(body.input); + body.input = addCodexBridgeMessage(body.input, !!body.tools); + } else { + // DEFAULT MODE: Keep original behavior with tool remap message + body.input = addToolRemapMessage(body.input, !!body.tools); + } + + // Handle orphaned function_call_output items (where function_call was an item_reference that got filtered) + // Instead of removing orphans (which causes infinite loops as LLM loses tool results), + // convert them to messages to preserve context while avoiding API errors + if (body.input) { + const functionCallIds = new Set( + body.input + .filter((item) => item.type === "function_call" && item.call_id) + .map((item) => item.call_id), + ); + body.input = body.input.map((item) => { + if ( + item.type === "function_call_output" && + !functionCallIds.has(item.call_id) + ) { + const toolName = + typeof (item as any).name === "string" + ? (item as any).name + : "tool"; + const callId = (item as any).call_id ?? ""; + let text: string; + try { + const out = (item as any).output; + text = typeof out === "string" ? out : JSON.stringify(out); + } catch { + text = String((item as any).output ?? ""); + } + if (text.length > 16000) { + text = text.slice(0, 16000) + "\n...[truncated]"; + } + return { + type: "message", + role: "assistant", + content: `[Previous ${toolName} result; call_id=${callId}]: ${text}`, + } as InputItem; + } + return item; + }); + } + } + + // Check if reasoning config is already set by AI SDK (from Ctrl+T variant selection) + // or by providerOptions (legacy/opencode variant config) + // Priority: body.reasoning > providerOptions > user config > plugin defaults + const existingEffort = + body.reasoning?.effort ?? body.providerOptions?.openai?.reasoningEffort; + const existingSummary = + body.reasoning?.summary ?? body.providerOptions?.openai?.reasoningSummary; + + logDebug("Checking for existing reasoning config from AI SDK/variant", { + existingEffort, + existingSummary, + fromBodyReasoning: !!body.reasoning?.effort, + fromProviderOptions: !!body.providerOptions?.openai?.reasoningEffort, + normalizedModel, + }); + + // If AI SDK or providerOptions already set reasoning config, respect it + // but validate it against model-specific constraints (e.g., xhigh → high for non-supporting models) + if (existingEffort) { + logDebug("Respected existing reasoning effort from AI SDK/variant", { + effort: existingEffort, + }); + + // Validate and fix the existing effort for model-specific constraints + const validatedEffort = validateReasoningEffort( + existingEffort, + normalizedModel, + ); + + body.reasoning = { + ...body.reasoning, + effort: validatedEffort, + summary: existingSummary ?? "auto", + }; + logDebug("Validated existing reasoning config for model constraints", { + original: existingEffort, + validated: validatedEffort, + }); + } else { + // No existing config, apply plugin computed defaults + const reasoningConfig = getReasoningConfig(normalizedModel, modelConfig); + body.reasoning = { + ...body.reasoning, + ...reasoningConfig, + }; + logDebug("Applied plugin computed reasoning config", body.reasoning); + } + + // Configure text verbosity + // Priority: body.text.verbosity > providerOptions.textVerbosity > user config > plugin default + const existingTextVerbosity = + body.text?.verbosity ?? body.providerOptions?.openai?.textVerbosity; + body.text = { + ...body.text, + verbosity: existingTextVerbosity ?? modelConfig.textVerbosity ?? "medium", + }; + + // Add include for encrypted reasoning content + // Default: ["reasoning.encrypted_content"] (required for stateless operation with store=false) + // This allows reasoning context to persist across turns without server-side storage + body.include = modelConfig.include || ["reasoning.encrypted_content"]; + + // Remove unsupported parameters + body.max_output_tokens = undefined; + body.max_completion_tokens = undefined; + + return body; } diff --git a/lib/types.ts b/lib/types.ts index 80c8b02..8373a50 100644 --- a/lib/types.ts +++ b/lib/types.ts @@ -18,7 +18,11 @@ export interface UserConfig { global: ConfigOptions; models: { [modelName: string]: { + id?: string; // Optional model ID (may come from config) options?: ConfigOptions; + variants?: { + [variantName: string]: ConfigOptions; + }; }; }; } @@ -137,6 +141,14 @@ export interface RequestBody { prompt_cache_key?: string; max_output_tokens?: number; max_completion_tokens?: number; + /** Provider options from OpenCode (includes variant configuration from modern config format) */ + providerOptions?: { + openai?: { + reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh"; + reasoningSummary?: "auto" | "concise" | "detailed" | "off" | "on"; + textVerbosity?: "low" | "medium" | "high"; + }; + }; [key: string]: unknown; } diff --git a/test/config.test.ts b/test/config.test.ts index e3ccfda..cb76ff0 100644 --- a/test/config.test.ts +++ b/test/config.test.ts @@ -1,151 +1,169 @@ -import { describe, it, expect } from 'vitest'; -import { getModelConfig, getReasoningConfig } from '../lib/request/request-transformer.js'; -import type { UserConfig } from '../lib/types.js'; - -describe('Configuration Parsing', () => { - const providerConfig = { - options: { - reasoningEffort: 'medium' as const, - reasoningSummary: 'auto' as const, - textVerbosity: 'medium' as const, - }, - models: { - 'gpt-5-codex': { - options: { - reasoningSummary: 'concise' as const, - }, - }, - 'gpt-5': { - options: { - reasoningEffort: 'high' as const, - }, - }, - }, - }; - - const userConfig: UserConfig = { - global: providerConfig.options || {}, - models: providerConfig.models || {}, - }; - - describe('getModelConfig', () => { - it('should merge global and model-specific config for gpt-5-codex', () => { - const codexConfig = getModelConfig('gpt-5-codex', userConfig); - - expect(codexConfig.reasoningEffort).toBe('medium'); // from global - expect(codexConfig.reasoningSummary).toBe('concise'); // from model override - expect(codexConfig.textVerbosity).toBe('medium'); // from global - }); - - it('should merge global and model-specific config for gpt-5', () => { - const gpt5Config = getModelConfig('gpt-5', userConfig); - - expect(gpt5Config.reasoningEffort).toBe('high'); // from model override - expect(gpt5Config.reasoningSummary).toBe('auto'); // from global - expect(gpt5Config.textVerbosity).toBe('medium'); // from global - }); - - it('should return empty config when no config provided', () => { - const emptyConfig = getModelConfig('gpt-5-codex', { global: {}, models: {} }); - - expect(emptyConfig).toEqual({}); - }); - }); - - describe('getReasoningConfig', () => { - it('should use user settings from merged config for gpt-5-codex', () => { - const codexConfig = getModelConfig('gpt-5-codex', userConfig); - const reasoningConfig = getReasoningConfig('gpt-5-codex', codexConfig); - - expect(reasoningConfig.effort).toBe('medium'); - expect(reasoningConfig.summary).toBe('concise'); - }); - - it('should return defaults when no config provided', () => { - const emptyConfig = getModelConfig('gpt-5-codex', { global: {}, models: {} }); - const defaultReasoning = getReasoningConfig('gpt-5-codex', emptyConfig); - - expect(defaultReasoning.effort).toBe('medium'); - expect(defaultReasoning.summary).toBe('auto'); - }); - - it('should use minimal effort for lightweight models (nano/mini)', () => { - const nanoReasoning = getReasoningConfig('gpt-5-nano', {}); - - expect(nanoReasoning.effort).toBe('minimal'); - expect(nanoReasoning.summary).toBe('auto'); - }); - - it('should normalize "minimal" to "low" for gpt-5-codex', () => { - const codexMinimalConfig = { reasoningEffort: 'minimal' as const }; - const codexMinimalReasoning = getReasoningConfig('gpt-5-codex', codexMinimalConfig); - - expect(codexMinimalReasoning.effort).toBe('low'); - expect(codexMinimalReasoning.summary).toBe('auto'); - }); - - it('should preserve "minimal" effort for non-codex models', () => { - const gpt5MinimalConfig = { reasoningEffort: 'minimal' as const }; - const gpt5MinimalReasoning = getReasoningConfig('gpt-5', gpt5MinimalConfig); - - expect(gpt5MinimalReasoning.effort).toBe('minimal'); - }); - - it('should handle high effort setting', () => { - const highConfig = { reasoningEffort: 'high' as const }; - const highReasoning = getReasoningConfig('gpt-5', highConfig); - - expect(highReasoning.effort).toBe('high'); - expect(highReasoning.summary).toBe('auto'); - }); - - it('should respect custom summary setting', () => { - const detailedConfig = { reasoningSummary: 'detailed' as const }; - const detailedReasoning = getReasoningConfig('gpt-5-codex', detailedConfig); - - expect(detailedReasoning.summary).toBe('detailed'); - }); - - it('should default codex-mini to medium effort', () => { - const codexMiniReasoning = getReasoningConfig('gpt-5-codex-mini', {}); - expect(codexMiniReasoning.effort).toBe('medium'); - }); - - it('should clamp codex-mini minimal/low to medium', () => { - const minimal = getReasoningConfig('gpt-5-codex-mini', { - reasoningEffort: 'minimal', - }); - const low = getReasoningConfig('gpt-5-codex-mini-high', { - reasoningEffort: 'low', - }); - - expect(minimal.effort).toBe('medium'); - expect(low.effort).toBe('medium'); - }); - - it('should keep codex-mini high effort when requested', () => { - const high = getReasoningConfig('codex-mini-latest', { - reasoningEffort: 'high', - }); - expect(high.effort).toBe('high'); - }); - }); - - describe('Model-specific behavior', () => { - it('should detect lightweight models correctly', () => { - const miniReasoning = getReasoningConfig('gpt-5-mini', {}); - expect(miniReasoning.effort).toBe('minimal'); - }); - - it('should detect codex models correctly', () => { - const codexConfig = { reasoningEffort: 'minimal' as const }; - const codexReasoning = getReasoningConfig('gpt-5-codex', codexConfig); - expect(codexReasoning.effort).toBe('low'); // normalized - }); - - it('should handle standard gpt-5 model', () => { - const gpt5Reasoning = getReasoningConfig('gpt-5', {}); - expect(gpt5Reasoning.effort).toBe('medium'); - }); - }); +import { describe, it, expect } from "vitest"; +import { + getModelConfig, + getReasoningConfig, +} from "../lib/request/request-transformer.js"; +import type { UserConfig } from "../lib/types.js"; + +describe("Configuration Parsing", () => { + const providerConfig = { + options: { + reasoningEffort: "medium" as const, + reasoningSummary: "auto" as const, + textVerbosity: "medium" as const, + }, + models: { + "gpt-5-codex": { + options: { + reasoningSummary: "concise" as const, + }, + }, + "gpt-5": { + options: { + reasoningEffort: "high" as const, + }, + }, + }, + }; + + const userConfig: UserConfig = { + global: providerConfig.options || {}, + models: providerConfig.models || {}, + }; + + describe("getModelConfig", () => { + it("should merge global and model-specific config for gpt-5-codex", () => { + const codexConfig = getModelConfig("gpt-5-codex", userConfig); + + expect(codexConfig.reasoningEffort).toBe("medium"); // from global + expect(codexConfig.reasoningSummary).toBe("concise"); // from model override + expect(codexConfig.textVerbosity).toBe("medium"); // from global + }); + + it("should merge global and model-specific config for gpt-5", () => { + const gpt5Config = getModelConfig("gpt-5", userConfig); + + expect(gpt5Config.reasoningEffort).toBe("high"); // from model override + expect(gpt5Config.reasoningSummary).toBe("auto"); // from global + expect(gpt5Config.textVerbosity).toBe("medium"); // from global + }); + + it("should return empty config when no config provided", () => { + const emptyConfig = getModelConfig("gpt-5-codex", { + global: {}, + models: {}, + }); + + expect(emptyConfig).toEqual({}); + }); + }); + + describe("getReasoningConfig", () => { + it("should use user settings from merged config for gpt-5-codex", () => { + const codexConfig = getModelConfig("gpt-5-codex", userConfig); + const reasoningConfig = getReasoningConfig("gpt-5-codex", codexConfig); + + expect(reasoningConfig.effort).toBe("medium"); + expect(reasoningConfig.summary).toBe("concise"); + }); + + it("should return defaults when no config provided", () => { + const emptyConfig = getModelConfig("gpt-5-codex", { + global: {}, + models: {}, + }); + const defaultReasoning = getReasoningConfig("gpt-5-codex", emptyConfig); + + expect(defaultReasoning.effort).toBe("medium"); + expect(defaultReasoning.summary).toBe("auto"); + }); + + it("should normalize minimal to low for lightweight models (nano/mini)", () => { + const nanoReasoning = getReasoningConfig("gpt-5-nano", {}); + + expect(nanoReasoning.effort).toBe("low"); + expect(nanoReasoning.summary).toBe("auto"); + }); + + it('should normalize "minimal" to "low" for gpt-5-codex', () => { + const codexMinimalConfig = { reasoningEffort: "minimal" as const }; + const codexMinimalReasoning = getReasoningConfig( + "gpt-5-codex", + codexMinimalConfig, + ); + + expect(codexMinimalReasoning.effort).toBe("low"); + expect(codexMinimalReasoning.summary).toBe("auto"); + }); + + it('should normalize "minimal" to "low" for all models (minimal not supported by gpt-5.1/5.2)', () => { + const gpt5MinimalConfig = { reasoningEffort: "minimal" as const }; + const gpt5MinimalReasoning = getReasoningConfig( + "gpt-5", + gpt5MinimalConfig, + ); + + expect(gpt5MinimalReasoning.effort).toBe("low"); + }); + + it("should handle high effort setting", () => { + const highConfig = { reasoningEffort: "high" as const }; + const highReasoning = getReasoningConfig("gpt-5", highConfig); + + expect(highReasoning.effort).toBe("high"); + expect(highReasoning.summary).toBe("auto"); + }); + + it("should respect custom summary setting", () => { + const detailedConfig = { reasoningSummary: "detailed" as const }; + const detailedReasoning = getReasoningConfig( + "gpt-5-codex", + detailedConfig, + ); + + expect(detailedReasoning.summary).toBe("detailed"); + }); + + it("should default codex-mini to medium effort", () => { + const codexMiniReasoning = getReasoningConfig("gpt-5-codex-mini", {}); + expect(codexMiniReasoning.effort).toBe("medium"); + }); + + it("should clamp codex-mini minimal/low to medium", () => { + const minimal = getReasoningConfig("gpt-5-codex-mini", { + reasoningEffort: "minimal", + }); + const low = getReasoningConfig("gpt-5-codex-mini-high", { + reasoningEffort: "low", + }); + + expect(minimal.effort).toBe("medium"); + expect(low.effort).toBe("medium"); + }); + + it("should keep codex-mini high effort when requested", () => { + const high = getReasoningConfig("codex-mini-latest", { + reasoningEffort: "high", + }); + expect(high.effort).toBe("high"); + }); + }); + + describe("Model-specific behavior", () => { + it("should normalize minimal to low for lightweight models", () => { + const miniReasoning = getReasoningConfig("gpt-5-mini", {}); + expect(miniReasoning.effort).toBe("low"); + }); + + it("should detect codex models correctly", () => { + const codexConfig = { reasoningEffort: "minimal" as const }; + const codexReasoning = getReasoningConfig("gpt-5-codex", codexConfig); + expect(codexReasoning.effort).toBe("low"); // normalized + }); + + it("should handle standard gpt-5 model", () => { + const gpt5Reasoning = getReasoningConfig("gpt-5", {}); + expect(gpt5Reasoning.effort).toBe("medium"); + }); + }); }); diff --git a/test/request-transformer.test.ts b/test/request-transformer.test.ts index e0fced8..7dd341e 100644 --- a/test/request-transformer.test.ts +++ b/test/request-transformer.test.ts @@ -1,1317 +1,1855 @@ -import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { describe, it, expect, beforeEach, afterEach } from "vitest"; import { - normalizeModel, - getModelConfig, - filterInput, - addToolRemapMessage, - isOpenCodeSystemPrompt, - filterOpenCodeSystemPrompts, - addCodexBridgeMessage, - transformRequestBody, -} from '../lib/request/request-transformer.js'; -import { TOOL_REMAP_MESSAGE } from '../lib/prompts/codex.js'; -import { CODEX_OPENCODE_BRIDGE } from '../lib/prompts/codex-opencode-bridge.js'; -import type { RequestBody, UserConfig, InputItem } from '../lib/types.js'; - -describe('Request Transformer Module', () => { - describe('normalizeModel', () => { - // NOTE: All gpt-5 models now normalize to gpt-5.1 as gpt-5 is being phased out - it('should normalize gpt-5-codex to gpt-5.1-codex', async () => { - expect(normalizeModel('gpt-5-codex')).toBe('gpt-5.1-codex'); - }); - - it('should normalize gpt-5 to gpt-5.1', async () => { - expect(normalizeModel('gpt-5')).toBe('gpt-5.1'); - }); - - it('should normalize variants containing "codex" to gpt-5.1-codex', async () => { - expect(normalizeModel('openai/gpt-5-codex')).toBe('gpt-5.1-codex'); - expect(normalizeModel('custom-gpt-5-codex-variant')).toBe('gpt-5.1-codex'); - }); - - it('should normalize variants containing "gpt-5" to gpt-5.1', async () => { - expect(normalizeModel('gpt-5-mini')).toBe('gpt-5.1'); - expect(normalizeModel('gpt-5-nano')).toBe('gpt-5.1'); - }); - - it('should return gpt-5.1 as default for unknown models', async () => { - expect(normalizeModel('unknown-model')).toBe('gpt-5.1'); - expect(normalizeModel('gpt-4')).toBe('gpt-5.1'); - }); - - it('should return gpt-5.1 for undefined', async () => { - expect(normalizeModel(undefined)).toBe('gpt-5.1'); - }); - - // Codex CLI preset name tests - legacy gpt-5 models now map to gpt-5.1 - describe('Codex CLI preset names', () => { - it('should normalize all gpt-5-codex presets to gpt-5.1-codex', async () => { - expect(normalizeModel('gpt-5-codex-low')).toBe('gpt-5.1-codex'); - expect(normalizeModel('gpt-5-codex-medium')).toBe('gpt-5.1-codex'); - expect(normalizeModel('gpt-5-codex-high')).toBe('gpt-5.1-codex'); - }); - - it('should normalize all gpt-5 presets to gpt-5.1', async () => { - expect(normalizeModel('gpt-5-minimal')).toBe('gpt-5.1'); - expect(normalizeModel('gpt-5-low')).toBe('gpt-5.1'); - expect(normalizeModel('gpt-5-medium')).toBe('gpt-5.1'); - expect(normalizeModel('gpt-5-high')).toBe('gpt-5.1'); - }); - - it('should prioritize codex over gpt-5 in model name', async () => { - // Model name contains BOTH "codex" and "gpt-5" - // Should return "gpt-5.1-codex" (codex checked first, maps to 5.1) - expect(normalizeModel('gpt-5-codex-low')).toBe('gpt-5.1-codex'); - expect(normalizeModel('my-gpt-5-codex-model')).toBe('gpt-5.1-codex'); - }); - - it('should normalize codex mini presets to gpt-5.1-codex-mini', async () => { - expect(normalizeModel('gpt-5-codex-mini')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('gpt-5-codex-mini-medium')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('gpt-5-codex-mini-high')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('openai/gpt-5-codex-mini-high')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('codex-mini-latest')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('openai/codex-mini-latest')).toBe('gpt-5.1-codex-mini'); - }); - - it('should normalize gpt-5.1 codex max presets', async () => { - expect(normalizeModel('gpt-5.1-codex-max')).toBe('gpt-5.1-codex-max'); - expect(normalizeModel('gpt-5.1-codex-max-high')).toBe('gpt-5.1-codex-max'); - expect(normalizeModel('gpt-5.1-codex-max-xhigh')).toBe('gpt-5.1-codex-max'); - expect(normalizeModel('openai/gpt-5.1-codex-max-medium')).toBe('gpt-5.1-codex-max'); - }); - - it('should normalize gpt-5.2 codex presets', async () => { - expect(normalizeModel('gpt-5.2-codex')).toBe('gpt-5.2-codex'); - expect(normalizeModel('gpt-5.2-codex-low')).toBe('gpt-5.2-codex'); - expect(normalizeModel('gpt-5.2-codex-medium')).toBe('gpt-5.2-codex'); - expect(normalizeModel('gpt-5.2-codex-high')).toBe('gpt-5.2-codex'); - expect(normalizeModel('gpt-5.2-codex-xhigh')).toBe('gpt-5.2-codex'); - expect(normalizeModel('openai/gpt-5.2-codex-xhigh')).toBe('gpt-5.2-codex'); - }); - - it('should normalize gpt-5.1 codex and mini slugs', async () => { - expect(normalizeModel('gpt-5.1-codex')).toBe('gpt-5.1-codex'); - expect(normalizeModel('openai/gpt-5.1-codex')).toBe('gpt-5.1-codex'); - expect(normalizeModel('gpt-5.1-codex-mini')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('gpt-5.1-codex-mini-high')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('openai/gpt-5.1-codex-mini-medium')).toBe('gpt-5.1-codex-mini'); - }); - - it('should normalize gpt-5.1 general-purpose slugs', async () => { - expect(normalizeModel('gpt-5.1')).toBe('gpt-5.1'); - expect(normalizeModel('openai/gpt-5.1')).toBe('gpt-5.1'); - expect(normalizeModel('GPT 5.1 High')).toBe('gpt-5.1'); - }); - }); - - // Edge case tests - legacy gpt-5 models now map to gpt-5.1 - describe('Edge cases', () => { - it('should handle uppercase model names', async () => { - expect(normalizeModel('GPT-5-CODEX')).toBe('gpt-5.1-codex'); - expect(normalizeModel('GPT-5-HIGH')).toBe('gpt-5.1'); - expect(normalizeModel('CODEx-MINI-LATEST')).toBe('gpt-5.1-codex-mini'); - }); - - it('should handle mixed case', async () => { - expect(normalizeModel('Gpt-5-Codex-Low')).toBe('gpt-5.1-codex'); - expect(normalizeModel('GpT-5-MeDiUm')).toBe('gpt-5.1'); - }); - - it('should handle special characters', async () => { - expect(normalizeModel('my_gpt-5_codex')).toBe('gpt-5.1-codex'); - expect(normalizeModel('gpt.5.high')).toBe('gpt-5.1'); - }); - - it('should handle old verbose names', async () => { - expect(normalizeModel('GPT 5 Codex Low (ChatGPT Subscription)')).toBe('gpt-5.1-codex'); - expect(normalizeModel('GPT 5 High (ChatGPT Subscription)')).toBe('gpt-5.1'); - }); - - it('should handle empty string', async () => { - expect(normalizeModel('')).toBe('gpt-5.1'); - }); - }); - }); - - describe('getModelConfig', () => { - describe('Per-model options (Bug Fix Verification)', () => { - it('should find per-model options using config key', async () => { - const userConfig: UserConfig = { - global: { reasoningEffort: 'medium' }, - models: { - 'gpt-5-codex-low': { - options: { reasoningEffort: 'low', textVerbosity: 'low' } - } - } - }; - - const result = getModelConfig('gpt-5-codex-low', userConfig); - expect(result.reasoningEffort).toBe('low'); - expect(result.textVerbosity).toBe('low'); - }); - - it('should merge global and per-model options (per-model wins)', async () => { - const userConfig: UserConfig = { - global: { - reasoningEffort: 'medium', - textVerbosity: 'medium', - include: ['reasoning.encrypted_content'] - }, - models: { - 'gpt-5-codex-high': { - options: { reasoningEffort: 'high' } // Override only effort - } - } - }; - - const result = getModelConfig('gpt-5-codex-high', userConfig); - expect(result.reasoningEffort).toBe('high'); // From per-model - expect(result.textVerbosity).toBe('medium'); // From global - expect(result.include).toEqual(['reasoning.encrypted_content']); // From global - }); - - it('should return global options when model not in config', async () => { - const userConfig: UserConfig = { - global: { reasoningEffort: 'medium' }, - models: { - 'gpt-5-codex-low': { options: { reasoningEffort: 'low' } } - } - }; - - // Looking up different model - const result = getModelConfig('gpt-5-codex', userConfig); - expect(result.reasoningEffort).toBe('medium'); // Global only - }); - - it('should handle empty config', async () => { - const result = getModelConfig('gpt-5-codex', { global: {}, models: {} }); - expect(result).toEqual({}); - }); - - it('should handle missing models object', async () => { - const userConfig: UserConfig = { - global: { reasoningEffort: 'low' }, - models: undefined as any - }; - const result = getModelConfig('gpt-5', userConfig); - expect(result.reasoningEffort).toBe('low'); - }); - }); - - describe('Backwards compatibility', () => { - it('should work with old verbose config keys', async () => { - const userConfig: UserConfig = { - global: {}, - models: { - 'GPT 5 Codex Low (ChatGPT Subscription)': { - options: { reasoningEffort: 'low' } - } - } - }; - - const result = getModelConfig('GPT 5 Codex Low (ChatGPT Subscription)', userConfig); - expect(result.reasoningEffort).toBe('low'); - }); - - it('should work with old configs that have id field', async () => { - const userConfig: UserConfig = { - global: {}, - models: { - 'gpt-5-codex-low': { - id: 'gpt-5-codex', // id field present but should be ignored - options: { reasoningEffort: 'low' } - } - } - }; - - const result = getModelConfig('gpt-5-codex-low', userConfig); - expect(result.reasoningEffort).toBe('low'); - }); - }); - - describe('Default models (no custom config)', () => { - it('should return global options for default gpt-5-codex', async () => { - const userConfig: UserConfig = { - global: { reasoningEffort: 'high' }, - models: {} - }; - - const result = getModelConfig('gpt-5-codex', userConfig); - expect(result.reasoningEffort).toBe('high'); - }); - - it('should return empty when no config at all', async () => { - const result = getModelConfig('gpt-5', undefined); - expect(result).toEqual({}); - }); - }); - }); - - describe('filterInput', () => { - it('should keep items without IDs unchanged', async () => { - const input: InputItem[] = [ - { type: 'message', role: 'user', content: 'hello' }, - ]; - const result = filterInput(input); - expect(result).toEqual(input); - expect(result![0]).not.toHaveProperty('id'); - }); - - it('should remove ALL message IDs (rs_, msg_, etc.) for store:false compatibility', async () => { - const input: InputItem[] = [ - { id: 'rs_123', type: 'message', role: 'assistant', content: 'hello' }, - { id: 'msg_456', type: 'message', role: 'user', content: 'world' }, - { id: 'assistant_789', type: 'message', role: 'assistant', content: 'test' }, - ]; - const result = filterInput(input); - - // All items should remain (no filtering), but ALL IDs removed - expect(result).toHaveLength(3); - expect(result![0]).not.toHaveProperty('id'); - expect(result![1]).not.toHaveProperty('id'); - expect(result![2]).not.toHaveProperty('id'); - expect(result![0].content).toBe('hello'); - expect(result![1].content).toBe('world'); - expect(result![2].content).toBe('test'); - }); - - it('should strip ID field but preserve all other properties', async () => { - const input: InputItem[] = [ - { - id: 'msg_123', - type: 'message', - role: 'user', - content: 'test', - metadata: { some: 'data' } - }, - ]; - const result = filterInput(input); - - expect(result).toHaveLength(1); - expect(result![0]).not.toHaveProperty('id'); - expect(result![0].type).toBe('message'); - expect(result![0].role).toBe('user'); - expect(result![0].content).toBe('test'); - expect(result![0]).toHaveProperty('metadata'); - }); - - it('should handle mixed items with and without IDs', async () => { - const input: InputItem[] = [ - { type: 'message', role: 'user', content: '1' }, - { id: 'rs_stored', type: 'message', role: 'assistant', content: '2' }, - { id: 'msg_123', type: 'message', role: 'user', content: '3' }, - ]; - const result = filterInput(input); - - // All items kept, IDs removed from items that had them - expect(result).toHaveLength(3); - expect(result![0]).not.toHaveProperty('id'); - expect(result![1]).not.toHaveProperty('id'); - expect(result![2]).not.toHaveProperty('id'); - expect(result![0].content).toBe('1'); - expect(result![1].content).toBe('2'); - expect(result![2].content).toBe('3'); - }); - - it('should handle custom ID formats (future-proof)', async () => { - const input: InputItem[] = [ - { id: 'custom_id_format', type: 'message', role: 'user', content: 'test' }, - { id: 'another-format-123', type: 'message', role: 'user', content: 'test2' }, - ]; - const result = filterInput(input); - - expect(result).toHaveLength(2); - expect(result![0]).not.toHaveProperty('id'); - expect(result![1]).not.toHaveProperty('id'); - }); - - it('should return undefined for undefined input', async () => { - expect(filterInput(undefined)).toBeUndefined(); - }); - - it('should return non-array input as-is', async () => { - const notArray = { notAnArray: true }; - expect(filterInput(notArray as any)).toBe(notArray); - }); - - it('should handle empty array', async () => { - const input: InputItem[] = []; - const result = filterInput(input); - expect(result).toEqual([]); - }); - }); - - describe('addToolRemapMessage', () => { - it('should prepend tool remap message when tools present', async () => { - const input: InputItem[] = [ - { type: 'message', role: 'user', content: 'hello' }, - ]; - const result = addToolRemapMessage(input, true); - - expect(result).toHaveLength(2); - expect(result![0].role).toBe('developer'); - expect(result![0].type).toBe('message'); - expect((result![0].content as any)[0].text).toContain('apply_patch'); - }); - - it('should not modify input when tools not present', async () => { - const input: InputItem[] = [ - { type: 'message', role: 'user', content: 'hello' }, - ]; - const result = addToolRemapMessage(input, false); - expect(result).toEqual(input); - }); - - it('should return undefined for undefined input', async () => { - expect(addToolRemapMessage(undefined, true)).toBeUndefined(); - }); - - it('should handle non-array input', async () => { - const notArray = { notAnArray: true }; - expect(addToolRemapMessage(notArray as any, true)).toBe(notArray); - }); - }); - - describe('isOpenCodeSystemPrompt', () => { - it('should detect OpenCode system prompt with string content', async () => { - const item: InputItem = { - type: 'message', - role: 'developer', - content: 'You are a coding agent running in OpenCode', - }; - expect(isOpenCodeSystemPrompt(item, null)).toBe(true); - }); - - it('should detect OpenCode system prompt with array content', async () => { - const item: InputItem = { - type: 'message', - role: 'developer', - content: [ - { - type: 'input_text', - text: 'You are a coding agent running in OpenCode', - }, - ], - }; - expect(isOpenCodeSystemPrompt(item, null)).toBe(true); - }); - - it('should detect with system role', async () => { - const item: InputItem = { - type: 'message', - role: 'system', - content: 'You are a coding agent running in OpenCode', - }; - expect(isOpenCodeSystemPrompt(item, null)).toBe(true); - }); - - it('should not detect non-system roles', async () => { - const item: InputItem = { - type: 'message', - role: 'user', - content: 'You are a coding agent running in OpenCode', - }; - expect(isOpenCodeSystemPrompt(item, null)).toBe(false); - }); - - it('should not detect different content', async () => { - const item: InputItem = { - type: 'message', - role: 'developer', - content: 'Different message', - }; - expect(isOpenCodeSystemPrompt(item, null)).toBe(false); - }); - - it('should NOT detect AGENTS.md content', async () => { - const item: InputItem = { - type: 'message', - role: 'developer', - content: '# Project Guidelines\n\nThis is custom AGENTS.md content for the project.', - }; - expect(isOpenCodeSystemPrompt(item, null)).toBe(false); - }); - - it('should NOT detect environment info concatenated with AGENTS.md', async () => { - const item: InputItem = { - type: 'message', - role: 'developer', - content: 'Environment: /path/to/project\nDate: 2025-01-01\n\n# AGENTS.md\n\nCustom instructions here.', - }; - expect(isOpenCodeSystemPrompt(item, null)).toBe(false); - }); - - it('should NOT detect content with codex signature in the middle', async () => { - const cachedPrompt = 'You are a coding agent running in OpenCode.'; - const item: InputItem = { - type: 'message', - role: 'developer', - // Has codex.txt content but with environment prepended (like OpenCode does) - content: 'Environment info here\n\nYou are a coding agent running in OpenCode.', - }; - // First 200 chars won't match because of prepended content - expect(isOpenCodeSystemPrompt(item, cachedPrompt)).toBe(false); - }); - - it('should detect with cached prompt exact match', async () => { - const cachedPrompt = 'You are a coding agent running in OpenCode'; - const item: InputItem = { - type: 'message', - role: 'developer', - content: 'You are a coding agent running in OpenCode', - }; - expect(isOpenCodeSystemPrompt(item, cachedPrompt)).toBe(true); - }); - }); - - describe('filterOpenCodeSystemPrompts', () => { - it('should filter out OpenCode system prompts', async () => { - const input: InputItem[] = [ - { - type: 'message', - role: 'developer', - content: 'You are a coding agent running in OpenCode', - }, - { type: 'message', role: 'user', content: 'hello' }, - ]; - const result = await filterOpenCodeSystemPrompts(input); - expect(result).toHaveLength(1); - expect(result![0].role).toBe('user'); - }); - - it('should keep user messages', async () => { - const input: InputItem[] = [ - { type: 'message', role: 'user', content: 'message 1' }, - { type: 'message', role: 'user', content: 'message 2' }, - ]; - const result = await filterOpenCodeSystemPrompts(input); - expect(result).toHaveLength(2); - }); - - it('should keep non-OpenCode developer messages', async () => { - const input: InputItem[] = [ - { type: 'message', role: 'developer', content: 'Custom instruction' }, - { type: 'message', role: 'user', content: 'hello' }, - ]; - const result = await filterOpenCodeSystemPrompts(input); - expect(result).toHaveLength(2); - }); - - it('should keep AGENTS.md content (not filter it)', async () => { - const input: InputItem[] = [ - { - type: 'message', - role: 'developer', - content: 'You are a coding agent running in OpenCode', // This is codex.txt - }, - { - type: 'message', - role: 'developer', - content: '# Project Guidelines\n\nThis is AGENTS.md content.', // This is AGENTS.md - }, - { type: 'message', role: 'user', content: 'hello' }, - ]; - const result = await filterOpenCodeSystemPrompts(input); - // Should filter codex.txt but keep AGENTS.md - expect(result).toHaveLength(2); - expect(result![0].content).toContain('AGENTS.md'); - expect(result![1].role).toBe('user'); - }); - - it('should keep environment+AGENTS.md concatenated message', async () => { - const input: InputItem[] = [ - { - type: 'message', - role: 'developer', - content: 'You are a coding agent running in OpenCode', // codex.txt alone - }, - { - type: 'message', - role: 'developer', - // environment + AGENTS.md joined (like OpenCode does) - content: 'Working directory: /path/to/project\nDate: 2025-01-01\n\n# AGENTS.md\n\nCustom instructions.', - }, - { type: 'message', role: 'user', content: 'hello' }, - ]; - const result = await filterOpenCodeSystemPrompts(input); - // Should filter first message (codex.txt) but keep second (env+AGENTS.md) - expect(result).toHaveLength(2); - expect(result![0].content).toContain('AGENTS.md'); - expect(result![1].role).toBe('user'); - }); - - it('should return undefined for undefined input', async () => { - expect(await filterOpenCodeSystemPrompts(undefined)).toBeUndefined(); - }); - }); - - describe('addCodexBridgeMessage', () => { - it('should prepend bridge message when tools present', async () => { - const input: InputItem[] = [ - { type: 'message', role: 'user', content: 'hello' }, - ]; - const result = addCodexBridgeMessage(input, true); - - expect(result).toHaveLength(2); - expect(result![0].role).toBe('developer'); - expect(result![0].type).toBe('message'); - expect((result![0].content as any)[0].text).toContain('Codex Running in OpenCode'); - }); - - it('should not modify input when tools not present', async () => { - const input: InputItem[] = [ - { type: 'message', role: 'user', content: 'hello' }, - ]; - const result = addCodexBridgeMessage(input, false); - expect(result).toEqual(input); - }); - - it('should return undefined for undefined input', async () => { - expect(addCodexBridgeMessage(undefined, true)).toBeUndefined(); - }); - }); - - describe('transformRequestBody', () => { - const codexInstructions = 'Test Codex Instructions'; - - it('preserves existing prompt_cache_key passed by host (OpenCode)', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [], - // Host-provided key (OpenCode session id) - // @ts-expect-error extra field allowed - prompt_cache_key: 'ses_host_key_123', - }; - const result: any = await transformRequestBody(body, codexInstructions); - expect(result.prompt_cache_key).toBe('ses_host_key_123'); - }); - - it('leaves prompt_cache_key unset when host does not supply one', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const result: any = await transformRequestBody(body, codexInstructions); - expect(result.prompt_cache_key).toBeUndefined(); - }); - - it('should set required Codex fields', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - - expect(result.store).toBe(false); - expect(result.stream).toBe(true); - expect(result.instructions).toBe(codexInstructions); - }); - - it('should normalize model name', async () => { - const body: RequestBody = { - model: 'gpt-5-mini', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.model).toBe('gpt-5.1'); // gpt-5 now maps to gpt-5.1 - }); - - it('should apply default reasoning config', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - - expect(result.reasoning?.effort).toBe('medium'); - expect(result.reasoning?.summary).toBe('auto'); - }); - - it('should apply user reasoning config', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const userConfig: UserConfig = { - global: { - reasoningEffort: 'high', - reasoningSummary: 'detailed', - }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - - expect(result.reasoning?.effort).toBe('high'); - expect(result.reasoning?.summary).toBe('detailed'); - }); - - it('should apply default text verbosity', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.text?.verbosity).toBe('medium'); - }); - - it('should apply user text verbosity', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const userConfig: UserConfig = { - global: { textVerbosity: 'low' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.text?.verbosity).toBe('low'); - }); - - it('should set default include for encrypted reasoning', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.include).toEqual(['reasoning.encrypted_content']); - }); - - it('should use user-configured include', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const userConfig: UserConfig = { - global: { include: ['custom_field', 'reasoning.encrypted_content'] }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.include).toEqual(['custom_field', 'reasoning.encrypted_content']); - }); - - it('should remove IDs from input array (keep all items, strip IDs)', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [ - { id: 'rs_123', type: 'message', role: 'assistant', content: 'old' }, - { type: 'message', role: 'user', content: 'new' }, - ], - }; - const result = await transformRequestBody(body, codexInstructions); - - // All items kept, IDs removed - expect(result.input).toHaveLength(2); - expect(result.input![0]).not.toHaveProperty('id'); - expect(result.input![1]).not.toHaveProperty('id'); - expect(result.input![0].content).toBe('old'); - expect(result.input![1].content).toBe('new'); - }); - - it('should add tool remap message when tools present', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [{ type: 'message', role: 'user', content: 'hello' }], - tools: [{ name: 'test_tool' }], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.input![0].role).toBe('developer'); - }); - - it('should not add tool remap message when tools absent', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [{ type: 'message', role: 'user', content: 'hello' }], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.input![0].role).toBe('user'); - }); - - it('should remove unsupported parameters', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - max_output_tokens: 1000, - max_completion_tokens: 2000, - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.max_output_tokens).toBeUndefined(); - expect(result.max_completion_tokens).toBeUndefined(); - }); - - it('should normalize minimal to low for gpt-5-codex', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'minimal' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should clamp xhigh to high for codex-mini', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex-mini-high', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'xhigh' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.reasoning?.effort).toBe('high'); - }); - - it('should clamp none to medium for codex-mini', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex-mini-medium', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'none' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.reasoning?.effort).toBe('medium'); - }); - - it('should default codex-max to high effort', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex-max', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.reasoning?.effort).toBe('high'); - }); - - it('should default gpt-5.2-codex to high effort', async () => { - const body: RequestBody = { - model: 'gpt-5.2-codex', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.model).toBe('gpt-5.2-codex'); - expect(result.reasoning?.effort).toBe('high'); - }); - - it('should preserve xhigh for codex-max when requested', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex-max-xhigh', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningSummary: 'auto' }, - models: { - 'gpt-5.1-codex-max-xhigh': { - options: { reasoningEffort: 'xhigh', reasoningSummary: 'detailed' }, - }, - }, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.1-codex-max'); - expect(result.reasoning?.effort).toBe('xhigh'); - expect(result.reasoning?.summary).toBe('detailed'); - }); - - it('should preserve xhigh for gpt-5.2-codex when requested', async () => { - const body: RequestBody = { - model: 'gpt-5.2-codex-xhigh', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningSummary: 'auto' }, - models: { - 'gpt-5.2-codex-xhigh': { - options: { reasoningEffort: 'xhigh', reasoningSummary: 'detailed' }, - }, - }, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.2-codex'); - expect(result.reasoning?.effort).toBe('xhigh'); - expect(result.reasoning?.summary).toBe('detailed'); - }); - - it('should downgrade xhigh to high for non-max codex', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex-high', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'xhigh' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.1-codex'); - expect(result.reasoning?.effort).toBe('high'); - }); - - it('should downgrade xhigh to high for non-max general models', async () => { - const body: RequestBody = { - model: 'gpt-5.1-high', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'xhigh' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.1'); - expect(result.reasoning?.effort).toBe('high'); - }); - - it('should preserve none for GPT-5.2', async () => { - const body: RequestBody = { - model: 'gpt-5.2-none', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'none' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.2'); - expect(result.reasoning?.effort).toBe('none'); - }); - - it('should upgrade none to low for GPT-5.2-codex (codex does not support none)', async () => { - const body: RequestBody = { - model: 'gpt-5.2-codex', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'none' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.2-codex'); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should normalize minimal to low for gpt-5.2-codex', async () => { - const body: RequestBody = { - model: 'gpt-5.2-codex', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'minimal' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.2-codex'); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should preserve none for GPT-5.1 general purpose', async () => { - const body: RequestBody = { - model: 'gpt-5.1-none', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'none' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.1'); - expect(result.reasoning?.effort).toBe('none'); - }); - - it('should upgrade none to low for GPT-5.1-codex (codex does not support none)', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'none' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.1-codex'); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should upgrade none to low for GPT-5.1-codex-max (codex max does not support none)', async () => { - const body: RequestBody = { - model: 'gpt-5.1-codex-max', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'none' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.model).toBe('gpt-5.1-codex-max'); - expect(result.reasoning?.effort).toBe('low'); - }); - - it('should preserve minimal for non-codex models', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [], - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'minimal' }, - models: {}, - }; - const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.reasoning?.effort).toBe('minimal'); - }); - - it('should use minimal effort for lightweight models', async () => { - const body: RequestBody = { - model: 'gpt-5-nano', - input: [], - }; - const result = await transformRequestBody(body, codexInstructions); - expect(result.reasoning?.effort).toBe('medium'); - }); - - it('should convert orphaned function_call_output to message to preserve context', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [ - { type: 'message', role: 'user', content: 'hello' }, - { type: 'function_call_output', role: 'assistant', call_id: 'orphan_call', name: 'read', output: '{}' } as any, - ], - }; - - const result = await transformRequestBody(body, codexInstructions); - - expect(result.tools).toBeUndefined(); - expect(result.input).toHaveLength(2); - expect(result.input![0].type).toBe('message'); - expect(result.input![1].type).toBe('message'); - expect(result.input![1].role).toBe('assistant'); - expect(result.input![1].content).toContain('[Previous read result; call_id=orphan_call]'); - }); - - it('should keep matched function_call pairs when no tools present (for compaction)', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [ - { type: 'message', role: 'user', content: 'hello' }, - { type: 'function_call', call_id: 'call_1', name: 'write', arguments: '{}' } as any, - { type: 'function_call_output', call_id: 'call_1', output: 'success' } as any, - ], - }; - - const result = await transformRequestBody(body, codexInstructions); - - expect(result.tools).toBeUndefined(); - expect(result.input).toHaveLength(3); - expect(result.input![1].type).toBe('function_call'); - expect(result.input![2].type).toBe('function_call_output'); - }); - - describe('CODEX_MODE parameter', () => { - it('should use bridge message when codexMode=true and tools present (default)', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [{ type: 'message', role: 'user', content: 'hello' }], - tools: [{ name: 'test_tool' }], - }; - const result = await transformRequestBody(body, codexInstructions, undefined, true); - - expect(result.input).toHaveLength(2); - expect(result.input![0].role).toBe('developer'); - expect((result.input![0].content as any)[0].text).toContain('Codex Running in OpenCode'); - }); - - it('should filter OpenCode prompts when codexMode=true', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [ - { - type: 'message', - role: 'developer', - content: 'You are a coding agent running in OpenCode', - }, - { type: 'message', role: 'user', content: 'hello' }, - ], - tools: [{ name: 'test_tool' }], - }; - const result = await transformRequestBody(body, codexInstructions, undefined, true); - - // Should have bridge message + user message (OpenCode prompt filtered out) - expect(result.input).toHaveLength(2); - expect(result.input![0].role).toBe('developer'); - expect((result.input![0].content as any)[0].text).toContain('Codex Running in OpenCode'); - expect(result.input![1].role).toBe('user'); - }); - - it('should not add bridge message when codexMode=true but no tools', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [{ type: 'message', role: 'user', content: 'hello' }], - }; - const result = await transformRequestBody(body, codexInstructions, undefined, true); - - expect(result.input).toHaveLength(1); - expect(result.input![0].role).toBe('user'); - }); - - it('should use tool remap message when codexMode=false', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [{ type: 'message', role: 'user', content: 'hello' }], - tools: [{ name: 'test_tool' }], - }; - const result = await transformRequestBody(body, codexInstructions, undefined, false); - - expect(result.input).toHaveLength(2); - expect(result.input![0].role).toBe('developer'); - expect((result.input![0].content as any)[0].text).toContain('apply_patch'); - }); - - it('should not filter OpenCode prompts when codexMode=false', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [ - { - type: 'message', - role: 'developer', - content: 'You are a coding agent running in OpenCode', - }, - { type: 'message', role: 'user', content: 'hello' }, - ], - tools: [{ name: 'test_tool' }], - }; - const result = await transformRequestBody(body, codexInstructions, undefined, false); - - // Should have tool remap + opencode prompt + user message - expect(result.input).toHaveLength(3); - expect(result.input![0].role).toBe('developer'); - expect((result.input![0].content as any)[0].text).toContain('apply_patch'); - expect(result.input![1].role).toBe('developer'); - expect(result.input![2].role).toBe('user'); - }); - - it('should default to codexMode=true when parameter not provided', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [{ type: 'message', role: 'user', content: 'hello' }], - tools: [{ name: 'test_tool' }], - }; - // Not passing codexMode parameter - should default to true - const result = await transformRequestBody(body, codexInstructions); - - // Should use bridge message (codexMode=true by default) - expect(result.input![0].role).toBe('developer'); - expect((result.input![0].content as any)[0].text).toContain('Codex Running in OpenCode'); - }); - }); - - // NEW: Integration tests for all config scenarios - describe('Integration: Complete Config Scenarios', () => { - describe('Scenario 1: Default models (no custom config)', () => { - it('should handle gpt-5-codex with global options only', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [] - }; - const userConfig: UserConfig = { - global: { reasoningEffort: 'high' }, - models: {} - }; - - const result = await transformRequestBody(body, codexInstructions, userConfig); - - expect(result.model).toBe('gpt-5.1-codex'); // gpt-5-codex now maps to gpt-5.1-codex - expect(result.reasoning?.effort).toBe('high'); // From global - expect(result.store).toBe(false); - }); - - it('should handle gpt-5-mini normalizing to gpt-5.1', async () => { - const body: RequestBody = { - model: 'gpt-5-mini', - input: [] - }; - - const result = await transformRequestBody(body, codexInstructions); - - expect(result.model).toBe('gpt-5.1'); // gpt-5 now maps to gpt-5.1 - expect(result.reasoning?.effort).toBe('medium'); // Default for normalized gpt-5.1 - }); - }); - - describe('Scenario 2: Custom preset names (new style)', () => { - const userConfig: UserConfig = { - global: { reasoningEffort: 'medium', include: ['reasoning.encrypted_content'] }, - models: { - 'gpt-5-codex-low': { - options: { reasoningEffort: 'low' } - }, - 'gpt-5-codex-high': { - options: { reasoningEffort: 'high', reasoningSummary: 'detailed' } - } - } - }; - - it('should apply per-model options for gpt-5-codex-low', async () => { - const body: RequestBody = { - model: 'gpt-5-codex-low', - input: [] - }; - - const result = await transformRequestBody(body, codexInstructions, userConfig); - - expect(result.model).toBe('gpt-5.1-codex'); // gpt-5-codex now maps to gpt-5.1-codex - expect(result.reasoning?.effort).toBe('low'); // From per-model - expect(result.include).toEqual(['reasoning.encrypted_content']); // From global - }); - - it('should apply per-model options for gpt-5-codex-high', async () => { - const body: RequestBody = { - model: 'gpt-5-codex-high', - input: [] - }; - - const result = await transformRequestBody(body, codexInstructions, userConfig); - - expect(result.model).toBe('gpt-5.1-codex'); // gpt-5-codex now maps to gpt-5.1-codex - expect(result.reasoning?.effort).toBe('high'); // From per-model - expect(result.reasoning?.summary).toBe('detailed'); // From per-model - }); - - it('should use global options for default gpt-5-codex', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [] - }; - - const result = await transformRequestBody(body, codexInstructions, userConfig); - - expect(result.model).toBe('gpt-5.1-codex'); // gpt-5-codex now maps to gpt-5.1-codex - expect(result.reasoning?.effort).toBe('medium'); // From global (no per-model) - }); - }); - - describe('Scenario 3: Backwards compatibility (old verbose names)', () => { - const userConfig: UserConfig = { - global: {}, - models: { - 'GPT 5 Codex Low (ChatGPT Subscription)': { - options: { reasoningEffort: 'low', textVerbosity: 'low' } - } - } - }; - - it('should find and apply old config format', async () => { - const body: RequestBody = { - model: 'GPT 5 Codex Low (ChatGPT Subscription)', - input: [] - }; - - const result = await transformRequestBody(body, codexInstructions, userConfig); - - expect(result.model).toBe('gpt-5.1-codex'); // gpt-5-codex now maps to gpt-5.1-codex - expect(result.reasoning?.effort).toBe('low'); // From per-model (old format) - expect(result.text?.verbosity).toBe('low'); - }); - }); - - describe('Scenario 4: Mixed default + custom models', () => { - const userConfig: UserConfig = { - global: { reasoningEffort: 'medium' }, - models: { - 'gpt-5-codex-low': { - options: { reasoningEffort: 'low' } - } - } - }; - - it('should use per-model for custom variant', async () => { - const body: RequestBody = { - model: 'gpt-5-codex-low', - input: [] - }; - - const result = await transformRequestBody(body, codexInstructions, userConfig); - - expect(result.reasoning?.effort).toBe('low'); // Per-model - }); - - it('should use global for default model', async () => { - const body: RequestBody = { - model: 'gpt-5', - input: [] - }; - - const result = await transformRequestBody(body, codexInstructions, userConfig); - - expect(result.reasoning?.effort).toBe('medium'); // Global - }); - }); - - describe('Scenario 5: Message ID filtering with multi-turn', () => { - it('should remove ALL IDs in multi-turn conversation', async () => { - const body: RequestBody = { - model: 'gpt-5-codex', - input: [ - { id: 'msg_turn1', type: 'message', role: 'user', content: 'first' }, - { id: 'rs_response1', type: 'message', role: 'assistant', content: 'response' }, - { id: 'msg_turn2', type: 'message', role: 'user', content: 'second' }, - { id: 'assistant_123', type: 'message', role: 'assistant', content: 'reply' }, - ] - }; - - const result = await transformRequestBody(body, codexInstructions); - - // All items kept, ALL IDs removed - expect(result.input).toHaveLength(4); - expect(result.input!.every(item => !item.id)).toBe(true); - expect(result.store).toBe(false); // Stateless mode - expect(result.include).toEqual(['reasoning.encrypted_content']); - }); - }); - - describe('Scenario 6: Complete end-to-end transformation', () => { - it('should handle full transformation: custom model + IDs + tools', async () => { - const userConfig: UserConfig = { - global: { include: ['reasoning.encrypted_content'] }, - models: { - 'gpt-5-codex-low': { - options: { - reasoningEffort: 'low', - textVerbosity: 'low', - reasoningSummary: 'auto' - } - } - } - }; - - const body: RequestBody = { - model: 'gpt-5-codex-low', - input: [ - { id: 'msg_1', type: 'message', role: 'user', content: 'test' }, - { id: 'rs_2', type: 'message', role: 'assistant', content: 'reply' } - ], - tools: [{ name: 'edit' }] - }; - - const result = await transformRequestBody(body, codexInstructions, userConfig); - - // Model normalized (gpt-5-codex now maps to gpt-5.1-codex) - expect(result.model).toBe('gpt-5.1-codex'); - - // IDs removed - expect(result.input!.every(item => !item.id)).toBe(true); - - // Per-model options applied - expect(result.reasoning?.effort).toBe('low'); - expect(result.reasoning?.summary).toBe('auto'); - expect(result.text?.verbosity).toBe('low'); - - // Codex fields set - expect(result.store).toBe(false); - expect(result.stream).toBe(true); - expect(result.instructions).toBe(codexInstructions); - expect(result.include).toEqual(['reasoning.encrypted_content']); - }); - }); - }); - }); + normalizeModel, + getModelConfig, + filterInput, + addToolRemapMessage, + isOpenCodeSystemPrompt, + filterOpenCodeSystemPrompts, + addCodexBridgeMessage, + transformRequestBody, +} from "../lib/request/request-transformer.js"; +import { TOOL_REMAP_MESSAGE } from "../lib/prompts/codex.js"; +import { CODEX_OPENCODE_BRIDGE } from "../lib/prompts/codex-opencode-bridge.js"; +import type { RequestBody, UserConfig, InputItem } from "../lib/types.js"; + +describe("Request Transformer Module", () => { + describe("normalizeModel", () => { + // NOTE: All gpt-5 models now normalize to gpt-5.1 as gpt-5 is being phased out + it("should normalize gpt-5-codex to gpt-5.1-codex", async () => { + expect(normalizeModel("gpt-5-codex")).toBe("gpt-5.1-codex"); + }); + + it("should normalize gpt-5 to gpt-5.1", async () => { + expect(normalizeModel("gpt-5")).toBe("gpt-5.1"); + }); + + it('should normalize variants containing "codex" to gpt-5.1-codex', async () => { + expect(normalizeModel("openai/gpt-5-codex")).toBe("gpt-5.1-codex"); + expect(normalizeModel("custom-gpt-5-codex-variant")).toBe( + "gpt-5.1-codex" + ); + }); + + it('should normalize variants containing "gpt-5" to gpt-5.1', async () => { + expect(normalizeModel("gpt-5-mini")).toBe("gpt-5.1"); + expect(normalizeModel("gpt-5-nano")).toBe("gpt-5.1"); + }); + + it("should return gpt-5.1 as default for unknown models", async () => { + expect(normalizeModel("unknown-model")).toBe("gpt-5.1"); + expect(normalizeModel("gpt-4")).toBe("gpt-5.1"); + }); + + it("should return gpt-5.1 for undefined", async () => { + expect(normalizeModel(undefined)).toBe("gpt-5.1"); + }); + + // Codex CLI preset name tests - legacy gpt-5 models now map to gpt-5.1 + describe("Codex CLI preset names", () => { + it("should normalize all gpt-5-codex presets to gpt-5.1-codex", async () => { + expect(normalizeModel("gpt-5-codex-low")).toBe("gpt-5.1-codex"); + expect(normalizeModel("gpt-5-codex-medium")).toBe("gpt-5.1-codex"); + expect(normalizeModel("gpt-5-codex-high")).toBe("gpt-5.1-codex"); + }); + + it("should normalize all gpt-5 presets to gpt-5.1", async () => { + expect(normalizeModel("gpt-5-minimal")).toBe("gpt-5.1"); + expect(normalizeModel("gpt-5-low")).toBe("gpt-5.1"); + expect(normalizeModel("gpt-5-medium")).toBe("gpt-5.1"); + expect(normalizeModel("gpt-5-high")).toBe("gpt-5.1"); + }); + + it("should prioritize codex over gpt-5 in model name", async () => { + // Model name contains BOTH "codex" and "gpt-5" + // Should return "gpt-5.1-codex" (codex checked first, maps to 5.1) + expect(normalizeModel("gpt-5-codex-low")).toBe("gpt-5.1-codex"); + expect(normalizeModel("my-gpt-5-codex-model")).toBe("gpt-5.1-codex"); + }); + + it("should normalize codex mini presets to gpt-5.1-codex-mini", async () => { + expect(normalizeModel("gpt-5-codex-mini")).toBe("gpt-5.1-codex-mini"); + expect(normalizeModel("gpt-5-codex-mini-medium")).toBe( + "gpt-5.1-codex-mini" + ); + expect(normalizeModel("gpt-5-codex-mini-high")).toBe( + "gpt-5.1-codex-mini" + ); + expect(normalizeModel("openai/gpt-5-codex-mini-high")).toBe( + "gpt-5.1-codex-mini" + ); + expect(normalizeModel("codex-mini-latest")).toBe("gpt-5.1-codex-mini"); + expect(normalizeModel("openai/codex-mini-latest")).toBe( + "gpt-5.1-codex-mini" + ); + }); + + it("should normalize gpt-5.1 codex max presets", async () => { + expect(normalizeModel("gpt-5.1-codex-max")).toBe("gpt-5.1-codex-max"); + expect(normalizeModel("gpt-5.1-codex-max-high")).toBe( + "gpt-5.1-codex-max" + ); + expect(normalizeModel("gpt-5.1-codex-max-xhigh")).toBe( + "gpt-5.1-codex-max" + ); + expect(normalizeModel("openai/gpt-5.1-codex-max-medium")).toBe( + "gpt-5.1-codex-max" + ); + }); + + it("should normalize gpt-5.2 codex presets", async () => { + expect(normalizeModel("gpt-5.2-codex")).toBe("gpt-5.2-codex"); + expect(normalizeModel("gpt-5.2-codex-low")).toBe("gpt-5.2-codex"); + expect(normalizeModel("gpt-5.2-codex-medium")).toBe("gpt-5.2-codex"); + expect(normalizeModel("gpt-5.2-codex-high")).toBe("gpt-5.2-codex"); + expect(normalizeModel("gpt-5.2-codex-xhigh")).toBe("gpt-5.2-codex"); + expect(normalizeModel("openai/gpt-5.2-codex-xhigh")).toBe( + "gpt-5.2-codex" + ); + }); + + it("should normalize gpt-5.1 codex and mini slugs", async () => { + expect(normalizeModel("gpt-5.1-codex")).toBe("gpt-5.1-codex"); + expect(normalizeModel("openai/gpt-5.1-codex")).toBe("gpt-5.1-codex"); + expect(normalizeModel("gpt-5.1-codex-mini")).toBe("gpt-5.1-codex-mini"); + expect(normalizeModel("gpt-5.1-codex-mini-high")).toBe( + "gpt-5.1-codex-mini" + ); + expect(normalizeModel("openai/gpt-5.1-codex-mini-medium")).toBe( + "gpt-5.1-codex-mini" + ); + }); + + it("should normalize gpt-5.1 general-purpose slugs", async () => { + expect(normalizeModel("gpt-5.1")).toBe("gpt-5.1"); + expect(normalizeModel("openai/gpt-5.1")).toBe("gpt-5.1"); + expect(normalizeModel("GPT 5.1 High")).toBe("gpt-5.1"); + }); + }); + + // Edge case tests - legacy gpt-5 models now map to gpt-5.1 + describe("Edge cases", () => { + it("should handle uppercase model names", async () => { + expect(normalizeModel("GPT-5-CODEX")).toBe("gpt-5.1-codex"); + expect(normalizeModel("GPT-5-HIGH")).toBe("gpt-5.1"); + expect(normalizeModel("CODEx-MINI-LATEST")).toBe("gpt-5.1-codex-mini"); + }); + + it("should handle mixed case", async () => { + expect(normalizeModel("Gpt-5-Codex-Low")).toBe("gpt-5.1-codex"); + expect(normalizeModel("GpT-5-MeDiUm")).toBe("gpt-5.1"); + }); + + it("should handle special characters", async () => { + expect(normalizeModel("my_gpt-5_codex")).toBe("gpt-5.1-codex"); + expect(normalizeModel("gpt.5.high")).toBe("gpt-5.1"); + }); + + it("should handle old verbose names", async () => { + expect(normalizeModel("GPT 5 Codex Low (ChatGPT Subscription)")).toBe( + "gpt-5.1-codex" + ); + expect(normalizeModel("GPT 5 High (ChatGPT Subscription)")).toBe( + "gpt-5.1" + ); + }); + + it("should handle empty string", async () => { + expect(normalizeModel("")).toBe("gpt-5.1"); + }); + }); + }); + + describe("getModelConfig", () => { + describe("Per-model options (Bug Fix Verification)", () => { + it("should find per-model options using config key", async () => { + const userConfig: UserConfig = { + global: { reasoningEffort: "medium" }, + models: { + "gpt-5-codex-low": { + options: { reasoningEffort: "low", textVerbosity: "low" }, + }, + }, + }; + + const result = getModelConfig("gpt-5-codex-low", userConfig); + expect(result.reasoningEffort).toBe("low"); + expect(result.textVerbosity).toBe("low"); + }); + + it("should merge global and per-model options (per-model wins)", async () => { + const userConfig: UserConfig = { + global: { + reasoningEffort: "medium", + textVerbosity: "medium", + include: ["reasoning.encrypted_content"], + }, + models: { + "gpt-5-codex-high": { + options: { reasoningEffort: "high" }, // Override only effort + }, + }, + }; + + const result = getModelConfig("gpt-5-codex-high", userConfig); + expect(result.reasoningEffort).toBe("high"); // From per-model + expect(result.textVerbosity).toBe("medium"); // From global + expect(result.include).toEqual(["reasoning.encrypted_content"]); // From global + }); + + it("should return global options when model not in config", async () => { + const userConfig: UserConfig = { + global: { reasoningEffort: "medium" }, + models: { + "gpt-5-codex-low": { options: { reasoningEffort: "low" } }, + }, + }; + + // Looking up different model + const result = getModelConfig("gpt-5-codex", userConfig); + expect(result.reasoningEffort).toBe("medium"); // Global only + }); + + it("should handle empty config", async () => { + const result = getModelConfig("gpt-5-codex", { + global: {}, + models: {}, + }); + expect(result).toEqual({}); + }); + + it("should handle missing models object", async () => { + const userConfig: UserConfig = { + global: { reasoningEffort: "low" }, + models: undefined as any, + }; + const result = getModelConfig("gpt-5", userConfig); + expect(result.reasoningEffort).toBe("low"); + }); + }); + + describe("Backwards compatibility", () => { + it("should work with old verbose config keys", async () => { + const userConfig: UserConfig = { + global: {}, + models: { + "GPT 5 Codex Low (ChatGPT Subscription)": { + options: { reasoningEffort: "low" }, + }, + }, + }; + + const result = getModelConfig( + "GPT 5 Codex Low (ChatGPT Subscription)", + userConfig + ); + expect(result.reasoningEffort).toBe("low"); + }); + + it("should work with old configs that have id field", async () => { + const userConfig: UserConfig = { + global: {}, + models: { + "gpt-5-codex-low": { + id: "gpt-5-codex", // id field present but should be ignored + options: { reasoningEffort: "low" }, + }, + }, + }; + + const result = getModelConfig("gpt-5-codex-low", userConfig); + expect(result.reasoningEffort).toBe("low"); + }); + }); + + describe("Default models (no custom config)", () => { + it("should return global options for default gpt-5-codex", async () => { + const userConfig: UserConfig = { + global: { reasoningEffort: "high" }, + models: {}, + }; + + const result = getModelConfig("gpt-5-codex", userConfig); + expect(result.reasoningEffort).toBe("high"); + }); + + it("should return empty when no config at all", async () => { + const result = getModelConfig("gpt-5", undefined); + expect(result).toEqual({}); + }); + }); + }); + + describe("filterInput", () => { + it("should keep items without IDs unchanged", async () => { + const input: InputItem[] = [ + { type: "message", role: "user", content: "hello" }, + ]; + const result = filterInput(input); + expect(result).toEqual(input); + expect(result![0]).not.toHaveProperty("id"); + }); + + it("should remove ALL message IDs (rs_, msg_, etc.) for store:false compatibility", async () => { + const input: InputItem[] = [ + { id: "rs_123", type: "message", role: "assistant", content: "hello" }, + { id: "msg_456", type: "message", role: "user", content: "world" }, + { + id: "assistant_789", + type: "message", + role: "assistant", + content: "test", + }, + ]; + const result = filterInput(input); + + // All items should remain (no filtering), but ALL IDs removed + expect(result).toHaveLength(3); + expect(result![0]).not.toHaveProperty("id"); + expect(result![1]).not.toHaveProperty("id"); + expect(result![2]).not.toHaveProperty("id"); + expect(result![0].content).toBe("hello"); + expect(result![1].content).toBe("world"); + expect(result![2].content).toBe("test"); + }); + + it("should strip ID field but preserve all other properties", async () => { + const input: InputItem[] = [ + { + id: "msg_123", + type: "message", + role: "user", + content: "test", + metadata: { some: "data" }, + }, + ]; + const result = filterInput(input); + + expect(result).toHaveLength(1); + expect(result![0]).not.toHaveProperty("id"); + expect(result![0].type).toBe("message"); + expect(result![0].role).toBe("user"); + expect(result![0].content).toBe("test"); + expect(result![0]).toHaveProperty("metadata"); + }); + + it("should handle mixed items with and without IDs", async () => { + const input: InputItem[] = [ + { type: "message", role: "user", content: "1" }, + { id: "rs_stored", type: "message", role: "assistant", content: "2" }, + { id: "msg_123", type: "message", role: "user", content: "3" }, + ]; + const result = filterInput(input); + + // All items kept, IDs removed from items that had them + expect(result).toHaveLength(3); + expect(result![0]).not.toHaveProperty("id"); + expect(result![1]).not.toHaveProperty("id"); + expect(result![2]).not.toHaveProperty("id"); + expect(result![0].content).toBe("1"); + expect(result![1].content).toBe("2"); + expect(result![2].content).toBe("3"); + }); + + it("should handle custom ID formats (future-proof)", async () => { + const input: InputItem[] = [ + { + id: "custom_id_format", + type: "message", + role: "user", + content: "test", + }, + { + id: "another-format-123", + type: "message", + role: "user", + content: "test2", + }, + ]; + const result = filterInput(input); + + expect(result).toHaveLength(2); + expect(result![0]).not.toHaveProperty("id"); + expect(result![1]).not.toHaveProperty("id"); + }); + + it("should return undefined for undefined input", async () => { + expect(filterInput(undefined)).toBeUndefined(); + }); + + it("should return non-array input as-is", async () => { + const notArray = { notAnArray: true }; + expect(filterInput(notArray as any)).toBe(notArray); + }); + + it("should handle empty array", async () => { + const input: InputItem[] = []; + const result = filterInput(input); + expect(result).toEqual([]); + }); + }); + + describe("addToolRemapMessage", () => { + it("should prepend tool remap message when tools present", async () => { + const input: InputItem[] = [ + { type: "message", role: "user", content: "hello" }, + ]; + const result = addToolRemapMessage(input, true); + + expect(result).toHaveLength(2); + expect(result![0].role).toBe("developer"); + expect(result![0].type).toBe("message"); + expect((result![0].content as any)[0].text).toContain("apply_patch"); + }); + + it("should not modify input when tools not present", async () => { + const input: InputItem[] = [ + { type: "message", role: "user", content: "hello" }, + ]; + const result = addToolRemapMessage(input, false); + expect(result).toEqual(input); + }); + + it("should return undefined for undefined input", async () => { + expect(addToolRemapMessage(undefined, true)).toBeUndefined(); + }); + + it("should handle non-array input", async () => { + const notArray = { notAnArray: true }; + expect(addToolRemapMessage(notArray as any, true)).toBe(notArray); + }); + }); + + describe("isOpenCodeSystemPrompt", () => { + it("should detect OpenCode system prompt with string content", async () => { + const item: InputItem = { + type: "message", + role: "developer", + content: "You are a coding agent running in OpenCode", + }; + expect(isOpenCodeSystemPrompt(item, null)).toBe(true); + }); + + it("should detect OpenCode system prompt with array content", async () => { + const item: InputItem = { + type: "message", + role: "developer", + content: [ + { + type: "input_text", + text: "You are a coding agent running in OpenCode", + }, + ], + }; + expect(isOpenCodeSystemPrompt(item, null)).toBe(true); + }); + + it("should detect with system role", async () => { + const item: InputItem = { + type: "message", + role: "system", + content: "You are a coding agent running in OpenCode", + }; + expect(isOpenCodeSystemPrompt(item, null)).toBe(true); + }); + + it("should not detect non-system roles", async () => { + const item: InputItem = { + type: "message", + role: "user", + content: "You are a coding agent running in OpenCode", + }; + expect(isOpenCodeSystemPrompt(item, null)).toBe(false); + }); + + it("should not detect different content", async () => { + const item: InputItem = { + type: "message", + role: "developer", + content: "Different message", + }; + expect(isOpenCodeSystemPrompt(item, null)).toBe(false); + }); + + it("should NOT detect AGENTS.md content", async () => { + const item: InputItem = { + type: "message", + role: "developer", + content: + "# Project Guidelines\n\nThis is custom AGENTS.md content for the project.", + }; + expect(isOpenCodeSystemPrompt(item, null)).toBe(false); + }); + + it("should NOT detect environment info concatenated with AGENTS.md", async () => { + const item: InputItem = { + type: "message", + role: "developer", + content: + "Environment: /path/to/project\nDate: 2025-01-01\n\n# AGENTS.md\n\nCustom instructions here.", + }; + expect(isOpenCodeSystemPrompt(item, null)).toBe(false); + }); + + it("should NOT detect content with codex signature in the middle", async () => { + const cachedPrompt = "You are a coding agent running in OpenCode."; + const item: InputItem = { + type: "message", + role: "developer", + // Has codex.txt content but with environment prepended (like OpenCode does) + content: + "Environment info here\n\nYou are a coding agent running in OpenCode.", + }; + // First 200 chars won't match because of prepended content + expect(isOpenCodeSystemPrompt(item, cachedPrompt)).toBe(false); + }); + + it("should detect with cached prompt exact match", async () => { + const cachedPrompt = "You are a coding agent running in OpenCode"; + const item: InputItem = { + type: "message", + role: "developer", + content: "You are a coding agent running in OpenCode", + }; + expect(isOpenCodeSystemPrompt(item, cachedPrompt)).toBe(true); + }); + }); + + describe("filterOpenCodeSystemPrompts", () => { + it("should filter out OpenCode system prompts", async () => { + const input: InputItem[] = [ + { + type: "message", + role: "developer", + content: "You are a coding agent running in OpenCode", + }, + { type: "message", role: "user", content: "hello" }, + ]; + const result = await filterOpenCodeSystemPrompts(input); + expect(result).toHaveLength(1); + expect(result![0].role).toBe("user"); + }); + + it("should keep user messages", async () => { + const input: InputItem[] = [ + { type: "message", role: "user", content: "message 1" }, + { type: "message", role: "user", content: "message 2" }, + ]; + const result = await filterOpenCodeSystemPrompts(input); + expect(result).toHaveLength(2); + }); + + it("should keep non-OpenCode developer messages", async () => { + const input: InputItem[] = [ + { type: "message", role: "developer", content: "Custom instruction" }, + { type: "message", role: "user", content: "hello" }, + ]; + const result = await filterOpenCodeSystemPrompts(input); + expect(result).toHaveLength(2); + }); + + it("should keep AGENTS.md content (not filter it)", async () => { + const input: InputItem[] = [ + { + type: "message", + role: "developer", + content: "You are a coding agent running in OpenCode", // This is codex.txt + }, + { + type: "message", + role: "developer", + content: "# Project Guidelines\n\nThis is AGENTS.md content.", // This is AGENTS.md + }, + { type: "message", role: "user", content: "hello" }, + ]; + const result = await filterOpenCodeSystemPrompts(input); + // Should filter codex.txt but keep AGENTS.md + expect(result).toHaveLength(2); + expect(result![0].content).toContain("AGENTS.md"); + expect(result![1].role).toBe("user"); + }); + + it("should keep environment+AGENTS.md concatenated message", async () => { + const input: InputItem[] = [ + { + type: "message", + role: "developer", + content: "You are a coding agent running in OpenCode", // codex.txt alone + }, + { + type: "message", + role: "developer", + // environment + AGENTS.md joined (like OpenCode does) + content: + "Working directory: /path/to/project\nDate: 2025-01-01\n\n# AGENTS.md\n\nCustom instructions.", + }, + { type: "message", role: "user", content: "hello" }, + ]; + const result = await filterOpenCodeSystemPrompts(input); + // Should filter first message (codex.txt) but keep second (env+AGENTS.md) + expect(result).toHaveLength(2); + expect(result![0].content).toContain("AGENTS.md"); + expect(result![1].role).toBe("user"); + }); + + it("should return undefined for undefined input", async () => { + expect(await filterOpenCodeSystemPrompts(undefined)).toBeUndefined(); + }); + }); + + describe("addCodexBridgeMessage", () => { + it("should prepend bridge message when tools present", async () => { + const input: InputItem[] = [ + { type: "message", role: "user", content: "hello" }, + ]; + const result = addCodexBridgeMessage(input, true); + + expect(result).toHaveLength(2); + expect(result![0].role).toBe("developer"); + expect(result![0].type).toBe("message"); + expect((result![0].content as any)[0].text).toContain( + "Codex Running in OpenCode" + ); + }); + + it("should not modify input when tools not present", async () => { + const input: InputItem[] = [ + { type: "message", role: "user", content: "hello" }, + ]; + const result = addCodexBridgeMessage(input, false); + expect(result).toEqual(input); + }); + + it("should return undefined for undefined input", async () => { + expect(addCodexBridgeMessage(undefined, true)).toBeUndefined(); + }); + }); + + describe("transformRequestBody", () => { + const codexInstructions = "Test Codex Instructions"; + + it("preserves existing prompt_cache_key passed by host (OpenCode)", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [], + // Host-provided key (OpenCode session id) + prompt_cache_key: "ses_host_key_123", + }; + const result: any = await transformRequestBody(body, codexInstructions); + expect(result.prompt_cache_key).toBe("ses_host_key_123"); + }); + + it("leaves prompt_cache_key unset when host does not supply one", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const result: any = await transformRequestBody(body, codexInstructions); + expect(result.prompt_cache_key).toBeUndefined(); + }); + + it("should set required Codex fields", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + + expect(result.store).toBe(false); + expect(result.stream).toBe(true); + expect(result.instructions).toBe(codexInstructions); + }); + + it("should normalize model name", async () => { + const body: RequestBody = { + model: "gpt-5-mini", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.model).toBe("gpt-5.1"); // gpt-5 now maps to gpt-5.1 + }); + + it("should apply default reasoning config", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + + expect(result.reasoning?.effort).toBe("medium"); + expect(result.reasoning?.summary).toBe("auto"); + }); + + it("should apply user reasoning config", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const userConfig: UserConfig = { + global: { + reasoningEffort: "high", + reasoningSummary: "detailed", + }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + expect(result.reasoning?.effort).toBe("high"); + expect(result.reasoning?.summary).toBe("detailed"); + }); + + it("should apply default text verbosity", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.text?.verbosity).toBe("medium"); + }); + + it("should apply user text verbosity", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const userConfig: UserConfig = { + global: { textVerbosity: "low" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.text?.verbosity).toBe("low"); + }); + + it("should set default include for encrypted reasoning", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.include).toEqual(["reasoning.encrypted_content"]); + }); + + it("should use user-configured include", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const userConfig: UserConfig = { + global: { include: ["custom_field", "reasoning.encrypted_content"] }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.include).toEqual([ + "custom_field", + "reasoning.encrypted_content", + ]); + }); + + it("should remove IDs from input array (keep all items, strip IDs)", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [ + { id: "rs_123", type: "message", role: "assistant", content: "old" }, + { type: "message", role: "user", content: "new" }, + ], + }; + const result = await transformRequestBody(body, codexInstructions); + + // All items kept, IDs removed + expect(result.input).toHaveLength(2); + expect(result.input![0]).not.toHaveProperty("id"); + expect(result.input![1]).not.toHaveProperty("id"); + expect(result.input![0].content).toBe("old"); + expect(result.input![1].content).toBe("new"); + }); + + it("should add tool remap message when tools present", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [{ type: "message", role: "user", content: "hello" }], + tools: [{ name: "test_tool" }], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.input![0].role).toBe("developer"); + }); + + it("should not add tool remap message when tools absent", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [{ type: "message", role: "user", content: "hello" }], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.input![0].role).toBe("user"); + }); + + it("should remove unsupported parameters", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + max_output_tokens: 1000, + max_completion_tokens: 2000, + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.max_output_tokens).toBeUndefined(); + expect(result.max_completion_tokens).toBeUndefined(); + }); + + it("should normalize minimal to low for gpt-5-codex", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "minimal" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should clamp xhigh to high for codex-mini", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex-mini-high", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "xhigh" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.reasoning?.effort).toBe("high"); + }); + + it("should clamp none to medium for codex-mini", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex-mini-medium", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "none" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.reasoning?.effort).toBe("medium"); + }); + + it("should default codex-max to high effort", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex-max", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.reasoning?.effort).toBe("high"); + }); + + it("should default gpt-5.2-codex to high effort", async () => { + const body: RequestBody = { + model: "gpt-5.2-codex", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.model).toBe("gpt-5.2-codex"); + expect(result.reasoning?.effort).toBe("high"); + }); + + it("should preserve xhigh for codex-max when requested", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex-max-xhigh", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningSummary: "auto" }, + models: { + "gpt-5.1-codex-max-xhigh": { + options: { reasoningEffort: "xhigh", reasoningSummary: "detailed" }, + }, + }, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.model).toBe("gpt-5.1-codex-max"); + expect(result.reasoning?.effort).toBe("xhigh"); + expect(result.reasoning?.summary).toBe("detailed"); + }); + + it("should preserve xhigh for gpt-5.2-codex when requested", async () => { + const body: RequestBody = { + model: "gpt-5.2-codex-xhigh", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningSummary: "auto" }, + models: { + "gpt-5.2-codex-xhigh": { + options: { reasoningEffort: "xhigh", reasoningSummary: "detailed" }, + }, + }, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.model).toBe("gpt-5.2-codex"); + expect(result.reasoning?.effort).toBe("xhigh"); + expect(result.reasoning?.summary).toBe("detailed"); + }); + + it("should downgrade xhigh to high for non-max codex", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex-high", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "xhigh" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.model).toBe("gpt-5.1-codex"); + expect(result.reasoning?.effort).toBe("high"); + }); + + it("should downgrade xhigh to high for non-max general models", async () => { + const body: RequestBody = { + model: "gpt-5.1-high", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "xhigh" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.model).toBe("gpt-5.1"); + expect(result.reasoning?.effort).toBe("high"); + }); + + it("should preserve none for GPT-5.2", async () => { + const body: RequestBody = { + model: "gpt-5.2-none", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "none" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.model).toBe("gpt-5.2"); + expect(result.reasoning?.effort).toBe("none"); + }); + + it("should upgrade none to low for GPT-5.2-codex (codex does not support none)", async () => { + const body: RequestBody = { + model: "gpt-5.2-codex", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "none" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.model).toBe("gpt-5.2-codex"); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should normalize minimal to low for gpt-5.2-codex", async () => { + const body: RequestBody = { + model: "gpt-5.2-codex", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "minimal" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.model).toBe("gpt-5.2-codex"); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should preserve none for GPT-5.1 general purpose", async () => { + const body: RequestBody = { + model: "gpt-5.1-none", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "none" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.model).toBe("gpt-5.1"); + expect(result.reasoning?.effort).toBe("none"); + }); + + it("should upgrade none to low for GPT-5.1-codex (codex does not support none)", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "none" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.model).toBe("gpt-5.1-codex"); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should upgrade none to low for GPT-5.1-codex-max (codex max does not support none)", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex-max", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "none" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.model).toBe("gpt-5.1-codex-max"); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should preserve minimal for non-codex models", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "minimal" }, + models: {}, + }; + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should use minimal effort for lightweight models", async () => { + const body: RequestBody = { + model: "gpt-5-nano", + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.reasoning?.effort).toBe("medium"); + }); + + it("should convert orphaned function_call_output to message to preserve context", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [ + { type: "message", role: "user", content: "hello" }, + { + type: "function_call_output", + role: "assistant", + call_id: "orphan_call", + name: "read", + output: "{}", + } as any, + ], + }; + + const result = await transformRequestBody(body, codexInstructions); + + expect(result.tools).toBeUndefined(); + expect(result.input).toHaveLength(2); + expect(result.input![0].type).toBe("message"); + expect(result.input![1].type).toBe("message"); + expect(result.input![1].role).toBe("assistant"); + expect(result.input![1].content).toContain( + "[Previous read result; call_id=orphan_call]" + ); + }); + + it("should keep matched function_call pairs when no tools present (for compaction)", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [ + { type: "message", role: "user", content: "hello" }, + { + type: "function_call", + call_id: "call_1", + name: "write", + arguments: "{}", + } as any, + { + type: "function_call_output", + call_id: "call_1", + output: "success", + } as any, + ], + }; + + const result = await transformRequestBody(body, codexInstructions); + + expect(result.tools).toBeUndefined(); + expect(result.input).toHaveLength(3); + expect(result.input![1].type).toBe("function_call"); + expect(result.input![2].type).toBe("function_call_output"); + }); + + describe("CODEX_MODE parameter", () => { + it("should use bridge message when codexMode=true and tools present (default)", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [{ type: "message", role: "user", content: "hello" }], + tools: [{ name: "test_tool" }], + }; + const result = await transformRequestBody( + body, + codexInstructions, + undefined, + true + ); + + expect(result.input).toHaveLength(2); + expect(result.input![0].role).toBe("developer"); + expect((result.input![0].content as any)[0].text).toContain( + "Codex Running in OpenCode" + ); + }); + + it("should filter OpenCode prompts when codexMode=true", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [ + { + type: "message", + role: "developer", + content: "You are a coding agent running in OpenCode", + }, + { type: "message", role: "user", content: "hello" }, + ], + tools: [{ name: "test_tool" }], + }; + const result = await transformRequestBody( + body, + codexInstructions, + undefined, + true + ); + + // Should have bridge message + user message (OpenCode prompt filtered out) + expect(result.input).toHaveLength(2); + expect(result.input![0].role).toBe("developer"); + expect((result.input![0].content as any)[0].text).toContain( + "Codex Running in OpenCode" + ); + expect(result.input![1].role).toBe("user"); + }); + + it("should not add bridge message when codexMode=true but no tools", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [{ type: "message", role: "user", content: "hello" }], + }; + const result = await transformRequestBody( + body, + codexInstructions, + undefined, + true + ); + + expect(result.input).toHaveLength(1); + expect(result.input![0].role).toBe("user"); + }); + + it("should use tool remap message when codexMode=false", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [{ type: "message", role: "user", content: "hello" }], + tools: [{ name: "test_tool" }], + }; + const result = await transformRequestBody( + body, + codexInstructions, + undefined, + false + ); + + expect(result.input).toHaveLength(2); + expect(result.input![0].role).toBe("developer"); + expect((result.input![0].content as any)[0].text).toContain( + "apply_patch" + ); + }); + + it("should not filter OpenCode prompts when codexMode=false", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [ + { + type: "message", + role: "developer", + content: "You are a coding agent running in OpenCode", + }, + { type: "message", role: "user", content: "hello" }, + ], + tools: [{ name: "test_tool" }], + }; + const result = await transformRequestBody( + body, + codexInstructions, + undefined, + false + ); + + // Should have tool remap + opencode prompt + user message + expect(result.input).toHaveLength(3); + expect(result.input![0].role).toBe("developer"); + expect((result.input![0].content as any)[0].text).toContain( + "apply_patch" + ); + expect(result.input![1].role).toBe("developer"); + expect(result.input![2].role).toBe("user"); + }); + + it("should default to codexMode=true when parameter not provided", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [{ type: "message", role: "user", content: "hello" }], + tools: [{ name: "test_tool" }], + }; + // Not passing codexMode parameter - should default to true + const result = await transformRequestBody(body, codexInstructions); + + // Should use bridge message (codexMode=true by default) + expect(result.input![0].role).toBe("developer"); + expect((result.input![0].content as any)[0].text).toContain( + "Codex Running in OpenCode" + ); + }); + }); + + // NEW: Integration tests for all config scenarios + describe("Integration: Complete Config Scenarios", () => { + describe("Scenario 1: Default models (no custom config)", () => { + it("should handle gpt-5-codex with global options only", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [], + }; + const userConfig: UserConfig = { + global: { reasoningEffort: "high" }, + models: {}, + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + expect(result.model).toBe("gpt-5.1-codex"); // gpt-5-codex now maps to gpt-5.1-codex + expect(result.reasoning?.effort).toBe("high"); // From global + expect(result.store).toBe(false); + }); + + it("should handle gpt-5-mini normalizing to gpt-5.1", async () => { + const body: RequestBody = { + model: "gpt-5-mini", + input: [], + }; + + const result = await transformRequestBody(body, codexInstructions); + + expect(result.model).toBe("gpt-5.1"); // gpt-5 now maps to gpt-5.1 + expect(result.reasoning?.effort).toBe("medium"); // Default for normalized gpt-5.1 + }); + }); + + describe("Scenario 2: Custom preset names (new style)", () => { + const userConfig: UserConfig = { + global: { + reasoningEffort: "medium", + include: ["reasoning.encrypted_content"], + }, + models: { + "gpt-5-codex-low": { + options: { reasoningEffort: "low" }, + }, + "gpt-5-codex-high": { + options: { + reasoningEffort: "high", + reasoningSummary: "detailed", + }, + }, + }, + }; + + it("should apply per-model options for gpt-5-codex-low", async () => { + const body: RequestBody = { + model: "gpt-5-codex-low", + input: [], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + expect(result.model).toBe("gpt-5.1-codex"); // gpt-5-codex now maps to gpt-5.1-codex + expect(result.reasoning?.effort).toBe("low"); // From per-model + expect(result.include).toEqual(["reasoning.encrypted_content"]); // From global + }); + + it("should apply per-model options for gpt-5-codex-high", async () => { + const body: RequestBody = { + model: "gpt-5-codex-high", + input: [], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + expect(result.model).toBe("gpt-5.1-codex"); // gpt-5-codex now maps to gpt-5.1-codex + expect(result.reasoning?.effort).toBe("high"); // From per-model + expect(result.reasoning?.summary).toBe("detailed"); // From per-model + }); + + it("should use global options for default gpt-5-codex", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + expect(result.model).toBe("gpt-5.1-codex"); // gpt-5-codex now maps to gpt-5.1-codex + expect(result.reasoning?.effort).toBe("medium"); // From global (no per-model) + }); + }); + + describe("Scenario 3: Backwards compatibility (old verbose names)", () => { + const userConfig: UserConfig = { + global: {}, + models: { + "GPT 5 Codex Low (ChatGPT Subscription)": { + options: { reasoningEffort: "low", textVerbosity: "low" }, + }, + }, + }; + + it("should find and apply old config format", async () => { + const body: RequestBody = { + model: "GPT 5 Codex Low (ChatGPT Subscription)", + input: [], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + expect(result.model).toBe("gpt-5.1-codex"); // gpt-5-codex now maps to gpt-5.1-codex + expect(result.reasoning?.effort).toBe("low"); // From per-model (old format) + expect(result.text?.verbosity).toBe("low"); + }); + }); + + describe("Scenario 4: Mixed default + custom models", () => { + const userConfig: UserConfig = { + global: { reasoningEffort: "medium" }, + models: { + "gpt-5-codex-low": { + options: { reasoningEffort: "low" }, + }, + }, + }; + + it("should use per-model for custom variant", async () => { + const body: RequestBody = { + model: "gpt-5-codex-low", + input: [], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + expect(result.reasoning?.effort).toBe("low"); // Per-model + }); + + it("should use global for default model", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + expect(result.reasoning?.effort).toBe("medium"); // Global + }); + }); + + describe("Scenario 5: Message ID filtering with multi-turn", () => { + it("should remove ALL IDs in multi-turn conversation", async () => { + const body: RequestBody = { + model: "gpt-5-codex", + input: [ + { + id: "msg_turn1", + type: "message", + role: "user", + content: "first", + }, + { + id: "rs_response1", + type: "message", + role: "assistant", + content: "response", + }, + { + id: "msg_turn2", + type: "message", + role: "user", + content: "second", + }, + { + id: "assistant_123", + type: "message", + role: "assistant", + content: "reply", + }, + ], + }; + + const result = await transformRequestBody(body, codexInstructions); + + // All items kept, ALL IDs removed + expect(result.input).toHaveLength(4); + expect(result.input!.every((item) => !item.id)).toBe(true); + expect(result.store).toBe(false); // Stateless mode + expect(result.include).toEqual(["reasoning.encrypted_content"]); + }); + }); + + describe("Scenario 6: Complete end-to-end transformation", () => { + it("should handle full transformation: custom model + IDs + tools", async () => { + const userConfig: UserConfig = { + global: { include: ["reasoning.encrypted_content"] }, + models: { + "gpt-5-codex-low": { + options: { + reasoningEffort: "low", + textVerbosity: "low", + reasoningSummary: "auto", + }, + }, + }, + }; + + const body: RequestBody = { + model: "gpt-5-codex-low", + input: [ + { id: "msg_1", type: "message", role: "user", content: "test" }, + { + id: "rs_2", + type: "message", + role: "assistant", + content: "reply", + }, + ], + tools: [{ name: "edit" }], + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + // Model normalized (gpt-5-codex now maps to gpt-5.1-codex) + expect(result.model).toBe("gpt-5.1-codex"); + + // IDs removed + expect(result.input!.every((item) => !item.id)).toBe(true); + + // Per-model options applied + expect(result.reasoning?.effort).toBe("low"); + expect(result.reasoning?.summary).toBe("auto"); + expect(result.text?.verbosity).toBe("low"); + + // Codex fields set + expect(result.store).toBe(false); + expect(result.stream).toBe(true); + expect(result.instructions).toBe(codexInstructions); + expect(result.include).toEqual(["reasoning.encrypted_content"]); + }); + }); + + describe("Variant Configuration (Modern Config Format)", () => { + it("should use reasoning config from providerOptions when present", async () => { + const body: RequestBody = { + model: "gpt-5.2", + providerOptions: { + openai: { + reasoningEffort: "high", + reasoningSummary: "detailed", + }, + }, + }; + + const result = await transformRequestBody(body, codexInstructions); + + // ProviderOptions config should override default + expect(result.reasoning?.effort).toBe("high"); + expect(result.reasoning?.summary).toBe("detailed"); + }); + + it("should use textVerbosity from providerOptions when present", async () => { + const body: RequestBody = { + model: "gpt-5.2", + providerOptions: { + openai: { + textVerbosity: "high", + }, + }, + }; + + const result = await transformRequestBody(body, codexInstructions); + + expect(result.text?.verbosity).toBe("high"); + }); + + it("should fall back to plugin defaults when providerOptions not present", async () => { + const body: RequestBody = { + model: "gpt-5.2", + }; + + const result = await transformRequestBody(body, codexInstructions); + + // Should use default reasoning effort for gpt-5.2 (high, as it supports xhigh) + expect(result.reasoning?.effort).toBe("high"); + // Should use default summary + expect(result.reasoning?.summary).toBe("auto"); + // Should use default text verbosity + expect(result.text?.verbosity).toBe("medium"); + }); + + it("should prioritize existing reasoning config (from AI SDK/variant) over userConfig", async () => { + const userConfig: UserConfig = { + global: { include: ["reasoning.encrypted_content"] }, + models: {}, + }; + + // After opencode fix: AI SDK sets body.reasoning directly from variant selection + const body: RequestBody = { + model: "gpt-5.2", + reasoning: { + effort: "xhigh", + summary: "detailed", + }, + text: { + verbosity: "high", + }, + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + // Existing reasoning config from AI SDK/variant should be respected + expect(result.reasoning?.effort).toBe("xhigh"); + expect(result.reasoning?.summary).toBe("detailed"); + // text verbosity should still be updated from body + expect(result.text?.verbosity).toBe("high"); + }); + + it("should prioritize existing reasoning config (from AI SDK/variant) over per-model config", async () => { + const userConfig: UserConfig = { + global: {}, + models: { + "gpt-5.2": { + options: { + reasoningEffort: "low", + reasoningSummary: "auto", + textVerbosity: "low", + }, + }, + }, + }; + + // After opencode fix: AI SDK sets body.reasoning directly from variant selection + const body: RequestBody = { + model: "gpt-5.2", + reasoning: { + effort: "high", + }, + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + // Existing reasoning config from AI SDK should override per-model config + expect(result.reasoning?.effort).toBe("high"); + // text verbosity should still respect per-model config + expect(result.text?.verbosity).toBe("low"); + }); + + it("should normalize existing reasoning effort for Codex Mini", async () => { + // After opencode fix: AI SDK sets body.reasoning directly + const body: RequestBody = { + model: "gpt-5.1-codex-mini", + reasoning: { + effort: "none", // Not supported by Codex Mini, should upgrade to medium + }, + }; + + const result = await transformRequestBody(body, codexInstructions); + + // Existing "none" from variant should be normalized to "medium" for Codex Mini + expect(result.reasoning?.effort).toBe("medium"); + }); + + it("should respect existing xhigh reasoning for Codex Max", async () => { + // After opencode fix: AI SDK sets body.reasoning directly + const body: RequestBody = { + model: "gpt-5.1-codex-max", + reasoning: { + effort: "xhigh", + summary: "detailed", + }, + }; + + const result = await transformRequestBody(body, codexInstructions); + + // Codex Max supports xhigh + expect(result.reasoning?.effort).toBe("xhigh"); + expect(result.reasoning?.summary).toBe("detailed"); + }); + + it("should normalize xhigh to high for models that do not support it", async () => { + // After opencode fix: AI SDK sets body.reasoning directly + const body: RequestBody = { + model: "gpt-5.1-codex", + reasoning: { + effort: "xhigh", // Not supported by gpt-5.1-codex, should downgrade to high + }, + }; + + const result = await transformRequestBody(body, codexInstructions); + + // gpt-5.1-codex doesn't support xhigh, should downgrade to high + expect(result.reasoning?.effort).toBe("high"); + }); + + it("should normalize none to low for Codex models", async () => { + // After opencode fix: AI SDK sets body.reasoning directly + const body: RequestBody = { + model: "gpt-5.1-codex", + reasoning: { + effort: "none", // Not supported by Codex models, should upgrade to low + }, + }; + + const result = await transformRequestBody(body, codexInstructions); + + // Codex models don't support "none", should upgrade to "low" + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should work with Ctrl+T variant cycling scenario", async () => { + const userConfig: UserConfig = { + global: { + reasoningEffort: "medium", + reasoningSummary: "auto", + textVerbosity: "medium", + }, + models: { + "gpt-5.1-codex": { + options: { + reasoningEffort: "low", + reasoningSummary: "detailed", + }, + }, + }, + }; + + // After opencode fix: AI SDK sets body.reasoning directly from Ctrl+T variant selection + const body: RequestBody = { + model: "gpt-5.1-codex", + reasoning: { + effort: "high", + summary: "detailed", + }, + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + // Variant config should override global config + expect(result.reasoning?.effort).toBe("high"); + expect(result.reasoning?.summary).toBe("detailed"); + // text verbosity should still respect per-model config + expect(result.text?.verbosity).toBe("medium"); // From global config + }); + + it("should normalize xhigh to high for models that do not support it", async () => { + const body: RequestBody = { + model: "gpt-5.1-codex", + providerOptions: { + openai: { + reasoningEffort: "xhigh", // Not supported by gpt-5.1-codex + }, + }, + }; + + const result = await transformRequestBody(body, codexInstructions); + + // gpt-5.1-codex doesn't support xhigh, should downgrade to high + expect(result.reasoning?.effort).toBe("high"); + }); + + it("should normalize none to low for Codex models", async () => { + const body: RequestBody = { + model: "gpt-5.2-codex", + providerOptions: { + openai: { + reasoningEffort: "none", // Not supported by Codex models + }, + }, + }; + + const result = await transformRequestBody(body, codexInstructions); + + // Codex models don't support "none", should upgrade to "low" + expect(result.reasoning?.effort).toBe("low"); + }); + + it("should handle partial providerOptions config", async () => { + const body: RequestBody = { + model: "gpt-5.2", + providerOptions: { + openai: { + reasoningEffort: "high", + // reasoningSummary and textVerbosity not provided + }, + }, + }; + + const result = await transformRequestBody(body, codexInstructions); + + // Should use provided effort, but fall back for summary and verbosity + expect(result.reasoning?.effort).toBe("high"); + expect(result.reasoning?.summary).toBe("auto"); // Default + expect(result.text?.verbosity).toBe("medium"); // Default + }); + + it("should work with Ctrl+T variant cycling scenario", async () => { + // Simulates what happens when user presses Ctrl+T to cycle from "medium" to "high" + const userConfig: UserConfig = { + global: { + reasoningEffort: "medium", + textVerbosity: "medium", + }, + models: {}, + }; + + // User cycles to "high" variant + const body: RequestBody = { + model: "gpt-5.2-codex", + providerOptions: { + openai: { + reasoningEffort: "high", + reasoningSummary: "detailed", + }, + }, + }; + + const result = await transformRequestBody( + body, + codexInstructions, + userConfig + ); + + // Variant config should override global config + expect(result.reasoning?.effort).toBe("high"); + expect(result.reasoning?.summary).toBe("detailed"); + expect(result.text?.verbosity).toBe("medium"); // From global config + }); + }); + }); + }); }); From 3aba6b32ad86264f5736a092e43167c2ab513acd Mon Sep 17 00:00:00 2001 From: Saatvik Arya Date: Sun, 4 Jan 2026 13:45:44 +0530 Subject: [PATCH 2/2] docs: update configuration documentation for variants system support - Update README.md with version-specific config selection instructions - Add comparison table showing modern vs legacy config differences - Update installation steps to check OpenCode version and select appropriate config - Rewrite config/README.md with comprehensive guide for both config formats - Document variant cycling feature (Ctrl+T) for modern config users - Update model variant documentation to reflect both config formats Clarifies which config file to use based on OpenCode version and explains the benefits of the modern variants system. --- README.md | 190 +++++++++++++++++++++++++++-------------------- config/README.md | 110 ++++++++++++++++++--------- 2 files changed, 186 insertions(+), 114 deletions(-) diff --git a/README.md b/README.md index b6ff7fb..2aedd13 100644 --- a/README.md +++ b/README.md @@ -97,11 +97,13 @@ Check [releases](https://github.com/numman-ali/opencode-openai-codex-auth/releas ### Agent Instructions (Install / Update) -If you’re using an AI agent (Codex/Claude/etc.) to install or update this plugin on a machine, use these steps: +If you're using an AI agent (Codex/Claude/etc.) to install or update this plugin on a machine, use these steps: 1. **Use the official repo**: https://github.com/numman-ali/opencode-openai-codex-auth -2. **Pin the plugin version** in the user’s OpenCode config (`~/.config/opencode/opencode.json` or project `.opencode.json`). -3. **Replace the config** with the latest `config/full-opencode.json` from the repo. +2. **Pin the plugin version** in the user's OpenCode config (`~/.config/opencode/opencode.json` or project `.opencode.json`). +3. **Determine OpenCode version** and copy the appropriate config: + - **v1.0.210+**: Use `config/opencode-modern.json` (compact, uses variants) + - **v1.0.209 or older**: Use `config/opencode-legacy.json` (full model list) 4. **Refresh the plugin cache** so OpenCode reinstalls the updated version. 5. **Restart OpenCode**. @@ -109,15 +111,21 @@ If you’re using an AI agent (Codex/Claude/etc.) to install or update this plug # 1) Update plugin version (replace with newest release tag) # Example: opencode-openai-codex-auth@4.2.0 -# 2) Copy full config -cp /config/full-opencode.json ~/.config/opencode/opencode.json +# 2) Check OpenCode version +opencode --version -# 3) Refresh OpenCode plugin cache +# 3) Copy appropriate config based on version +# For v1.0.210+ (recommended): +cp /config/opencode-modern.json ~/.config/opencode/opencode.json + +# For older versions: +cp /config/opencode-legacy.json ~/.config/opencode/opencode.json + +# 4) Refresh OpenCode plugin cache rm -rf ~/.cache/opencode/node_modules ~/.cache/opencode/bun.lock -# 4) Optional sanity check for GPT-5.2-Codex presets -jq '.provider.openai.models | keys | map(select(startswith("gpt-5.2-codex")))' \ - ~/.config/opencode/opencode.json +# 5) Optional sanity check for available models +jq '.provider.openai.models | keys | length' ~/.config/opencode/opencode.json ``` > **Note**: If using a project-local config, replace the target path with `/.opencode.json`. @@ -126,77 +134,56 @@ jq '.provider.openai.models | keys | map(select(startswith("gpt-5.2-codex")))' \ #### ⚠️ REQUIRED: Full Configuration (Only Supported Setup) -**IMPORTANT**: You MUST use the full configuration from [`config/full-opencode.json`](./config/full-opencode.json). Other configurations are not officially supported and may not work reliably. +**IMPORTANT**: You MUST use one of the pre-configured files from the `config/` directory. Other configurations are not officially supported and may not work reliably. -**Why the full config is required:** -- GPT 5 models can be temperamental - some work, some don't, some may error -- The full config has been tested and verified to work -- Minimal configs lack proper model metadata for OpenCode features -- Older GPT 5.0 models are deprecated and being phased out by OpenAI +**Two configuration files available based on your OpenCode version:** -1. **Copy the full configuration** from [`config/full-opencode.json`](./config/full-opencode.json) to your opencode config file. +| File | OpenCode Version | Description | +|------|------------------|-------------| +| [`config/opencode-modern.json`](./config/opencode-modern.json) | **v1.0.210+ (Jan 2026+)** | Compact config using variants system - 6 models with built-in reasoning level variants | +| [`config/opencode-legacy.json`](./config/opencode-legacy.json) | **v1.0.209 and below** | Extended config with separate model entries for each reasoning level - 20+ individual model definitions | - The config includes 22 models with image input support. Here's a condensed example showing the structure: +**Why two configs?** +- OpenCode v1.0.210+ introduced a **variants system** that allows defining reasoning effort levels as variants under a single model +- This reduces config size from 572 lines to ~150 lines while maintaining the same functionality +- Use the legacy config if you're on an older OpenCode version -```json -{ - "$schema": "https://opencode.ai/config.json", - "plugin": ["opencode-openai-codex-auth@4.2.0"], - "provider": { - "openai": { - "options": { - "reasoningEffort": "medium", - "reasoningSummary": "auto", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false - }, - "models": { - "gpt-5.2-high": { - "name": "GPT 5.2 High (OAuth)", - "limit": { "context": 272000, "output": 128000 }, - "modalities": { "input": ["text", "image"], "output": ["text"] }, - "options": { - "reasoningEffort": "high", - "reasoningSummary": "detailed", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false - } - }, - "gpt-5.1-codex-max-high": { - "name": "GPT 5.1 Codex Max High (OAuth)", - "limit": { "context": 272000, "output": 128000 }, - "modalities": { "input": ["text", "image"], "output": ["text"] }, - "options": { - "reasoningEffort": "high", - "reasoningSummary": "detailed", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false - } - } - // ... 20 more models - see config/full-opencode.json for complete list - } - } - } -} +**How to choose:** + +1. **If you have OpenCode v1.0.210 or newer** (check with `opencode --version`): + - ✅ Use [`config/opencode-modern.json`](./config/opencode-modern.json) + - Benefits: Cleaner config, built-in variant cycling with `Ctrl+T`, easier to maintain + +2. **If you have OpenCode v1.0.209 or older**: + - ✅ Use [`config/opencode-legacy.json`](./config/opencode-legacy.json) + - This provides the same 20+ model variants as separate entries + +**Quick install:** + +```bash +# For OpenCode v1.0.210+ (recommended) +cp /config/opencode-modern.json ~/.config/opencode/opencode.json + +# For older OpenCode versions +cp /config/opencode-legacy.json ~/.config/opencode/opencode.json ``` - **⚠️ Copy the complete file** from [`config/full-opencode.json`](./config/full-opencode.json) - don't use this truncated example. +**What you get:** - **Global config**: `~/.config/opencode/opencode.json` - **Project config**: `/.opencode.json` +| Config File | Model Families | Reasoning Variants | Total Models | +|------------|----------------|-------------------|--------------| +| `opencode-modern.json` | 6 | Built-in variants (low/medium/high/xhigh) | 6 base models with 19 total variants | +| `opencode-legacy.json` | 6 | Separate model entries | 20 individual model definitions | - This gives you 22 model variants with different reasoning levels: - - **gpt-5.2** (none/low/medium/high/xhigh) - Latest GPT 5.2 model with full reasoning support - - **gpt-5.2-codex** (low/medium/high/xhigh) - GPT 5.2 Codex presets - - **gpt-5.1-codex-max** (low/medium/high/xhigh) - Codex Max presets - - **gpt-5.1-codex** (low/medium/high) - Codex model presets - - **gpt-5.1-codex-mini** (medium/high) - Codex mini tier presets - - **gpt-5.1** (none/low/medium/high) - General-purpose reasoning presets +Both configs provide access to the same model families: +- **gpt-5.2** (none/low/medium/high/xhigh) - Latest GPT 5.2 model with full reasoning support +- **gpt-5.2-codex** (low/medium/high/xhigh) - GPT 5.2 Codex presets +- **gpt-5.1-codex-max** (low/medium/high/xhigh) - Codex Max presets +- **gpt-5.1-codex** (low/medium/high) - Codex model presets +- **gpt-5.1-codex-mini** (medium/high) - Codex mini tier presets +- **gpt-5.1** (none/low/medium/high) - General-purpose reasoning presets - All appear in the opencode model selector as "GPT 5.1 Codex Low (OAuth)", "GPT 5.1 High (OAuth)", etc. +All appear in the opencode model selector as "GPT 5.1 Codex Low (OAuth)", "GPT 5.1 High (OAuth)", etc. ### Prompt caching & usage limits @@ -262,9 +249,39 @@ opencode run "balanced task" --model=openai/gpt-5.1-codex-mini-medium opencode run "complex code" --model=openai/gpt-5.1-codex-mini-high ``` -### Available Model Variants (Full Config) +### Available Model Variants + +When using the recommended config file, you get pre-configured variants. The model ID format differs based on your OpenCode version: + +**For OpenCode v1.0.210+ (modern config with variants):** -When using [`config/full-opencode.json`](./config/full-opencode.json), you get these pre-configured variants: +Use the base model with variant suffix: + +```bash +# Variant cycling available with Ctrl+T +opencode run "task" --model=openai/gpt-5.2:low +opencode run "task" --model=openai/gpt-5.2:medium +opencode run "task" --model=openai/gpt-5.2:high +``` + +| Base Model | Available Variants | TUI Display Name | +|------------|-------------------|------------------| +| `gpt-5.2` | none, low, medium, high, xhigh | GPT 5.2 {variant} (OAuth) | +| `gpt-5.2-codex` | low, medium, high, xhigh | GPT 5.2 Codex {variant} (OAuth) | +| `gpt-5.1-codex-max` | low, medium, high, xhigh | GPT 5.1 Codex Max {variant} (OAuth) | +| `gpt-5.1-codex` | low, medium, high | GPT 5.1 Codex {variant} (OAuth) | +| `gpt-5.1-codex-mini` | medium, high | GPT 5.1 Codex Mini {variant} (OAuth) | +| `gpt-5.1` | none, low, medium, high | GPT 5.1 {variant} (OAuth) | + +**For OpenCode v1.0.209 and below (legacy config with separate entries):** + +Use explicit model IDs: + +```bash +opencode run "task" --model=openai/gpt-5.2-low +opencode run "task" --model=openai/gpt-5.2-medium +opencode run "task" --model=openai/gpt-5.2-high +``` | CLI Model ID | TUI Display Name | Reasoning Effort | Best For | |--------------|------------------|-----------------|----------| @@ -298,7 +315,7 @@ When using [`config/full-opencode.json`](./config/full-opencode.json), you get t > > **Note**: GPT 5.2, GPT 5.2 Codex, and Codex Max all support `xhigh` reasoning. Use explicit reasoning levels (e.g., `gpt-5.2-high`, `gpt-5.2-codex-xhigh`, `gpt-5.1-codex-max-xhigh`) for precise control. -> **⚠️ Important**: GPT 5 models can be temperamental - some variants may work better than others, some may give errors, and behavior may vary. Stick to the presets above configured in `full-opencode.json` for best results. +> **⚠️ Important**: GPT 5 models can be temperamental - some variants may work better than others, some may give errors, and behavior may vary. Stick to the presets above configured in your config file for best results. All accessed via your ChatGPT Plus/Pro subscription. @@ -340,12 +357,25 @@ These defaults are tuned for Codex CLI-style usage and can be customized (see Co ### ⚠️ REQUIRED: Use Pre-Configured File -**YOU MUST use [`config/full-opencode.json`](./config/full-opencode.json)** - this is the only officially supported configuration: -- 22 pre-configured model variants (GPT 5.2, GPT 5.2 Codex, GPT 5.1, Codex, Codex Max, Codex Mini) -- Image input support enabled for all models -- Optimal configuration for each reasoning level -- All variants visible in the opencode model selector -- Required metadata for OpenCode features to work properly +**YOU MUST use one of the pre-configured files from the `config/` directory** - this is the only officially supported configuration: + +**For OpenCode v1.0.210+ (Jan 2026+):** +- ✅ Use [`config/opencode-modern.json`](./config/opencode-modern.json) +- 6 base models with built-in variants +- ~150 lines, easier to maintain +- Built-in variant cycling (`Ctrl+T`) + +**For OpenCode v1.0.209 and below:** +- ✅ Use [`config/opencode-legacy.json`](./config/opencode-legacy.json) +- 20+ individual model definitions +- 572 lines, compatible with older versions + +Both configs provide: +- ✅ Pre-configured model variants for all reasoning levels +- ✅ Image input support enabled for all models +- ✅ Optimal configuration for each reasoning level +- ✅ All variants visible in the opencode model selector +- ✅ Required metadata for OpenCode features to work properly **Do NOT use other configurations** - they are not supported and may fail unpredictably with GPT 5 models. diff --git a/config/README.md b/config/README.md index 2722711..3ddd69c 100644 --- a/config/README.md +++ b/config/README.md @@ -1,59 +1,101 @@ # Configuration -This directory contains the official opencode configuration for the OpenAI Codex OAuth plugin. +This directory contains the official opencode configuration files for the OpenAI Codex OAuth plugin. -## ⚠️ REQUIRED Configuration File +## ⚠️ REQUIRED: Choose the Right Configuration -### full-opencode.json (REQUIRED - USE THIS ONLY) +**Two configuration files are available based on your OpenCode version:** -**YOU MUST use this configuration file** - it is the ONLY officially supported setup: +| File | OpenCode Version | Description | +|------|------------------|-------------| +| [`opencode-modern.json`](./opencode-modern.json) | **v1.0.210+ (Jan 2026+)** | Compact config using variants system - 6 models with built-in reasoning level variants | +| [`opencode-legacy.json`](./opencode-legacy.json) | **v1.0.209 and below** | Extended config with separate model entries for each reasoning level - 20+ individual model definitions | +### Which one should I use? + +**If you have OpenCode v1.0.210 or newer** (check with `opencode --version`): +```bash +cp config/opencode-modern.json ~/.config/opencode/opencode.json +``` + +**If you have OpenCode v1.0.209 or older**: ```bash -cp config/full-opencode.json ~/.config/opencode/opencode.json +cp config/opencode-legacy.json ~/.config/opencode/opencode.json ``` -**Why this is required:** -- GPT 5 models can be temperamental and need proper configuration -- Contains 22 verified GPT 5.2/5.1 model variants (GPT 5.2, GPT 5.2 Codex, Codex, Codex Max, Codex Mini, and general GPT 5.1 including `gpt-5.1-codex-max-low/medium/high/xhigh`) -- Includes all required metadata for OpenCode features -- Guaranteed to work reliably -- Global options for all models + per-model configuration overrides +### Why two configs? + +OpenCode v1.0.210+ introduced a **variants system** that allows defining reasoning effort levels as variants under a single model. This reduces config size from 572 lines to ~150 lines while maintaining the same functionality. + +**What you get:** -**What's included:** -- All supported GPT 5.2/5.1 variants: gpt-5.2, gpt-5.2-codex, gpt-5.1, gpt-5.1-codex, gpt-5.1-codex-max, gpt-5.1-codex-mini -- Proper reasoning effort settings for each variant (including new `xhigh` for Codex Max) -- Context limits (272k context / 128k output for all Codex families, including Codex Max) -- Required options: `store: false`, `include: ["reasoning.encrypted_content"]` +| Config File | Model Families | Reasoning Variants | Total Models | +|------------|----------------|-------------------|--------------| +| `opencode-modern.json` | 6 | Built-in variants (low/medium/high/xhigh) | 6 base models with 19 total variants | +| `opencode-legacy.json` | 6 | Separate model entries | 20 individual model definitions | -### ❌ Other Configurations (NOT SUPPORTED) +Both configs provide: +- ✅ All supported GPT 5.2/5.1 variants: gpt-5.2, gpt-5.2-codex, gpt-5.1, gpt-5.1-codex, gpt-5.1-codex-max, gpt-5.1-codex-mini +- ✅ Proper reasoning effort settings for each variant (including `xhigh` for Codex Max/5.2) +- ✅ Context limits (272k context / 128k output for all Codex families) +- ✅ Required options: `store: false`, `include: ["reasoning.encrypted_content"]` +- ✅ Image input support for all models +- ✅ All required metadata for OpenCode features -**DO NOT use:** -- `minimal-opencode.json` - NOT supported, will fail unpredictably -- `full-opencode-gpt5.json` - DEPRECATED, GPT 5.0 models are being phased out by OpenAI -- Custom configurations - NOT recommended, may not work reliably +### Modern Config Benefits (v1.0.210+) -**Why other configs don't work:** -- GPT 5 models need specific configurations -- Missing metadata breaks OpenCode features -- No support for usage limits or context compaction -- Cannot guarantee stable operation +- **74% smaller**: 150 lines vs 572 lines +- **DRY**: Common options defined once at provider level +- **Variant cycling**: Built-in support for `Ctrl+T` to switch reasoning levels +- **Easier maintenance**: Add new variants without copying model definitions ## Usage -**ONLY ONE OPTION** - use the full configuration: +1. **Check your OpenCode version**: + ```bash + opencode --version + ``` -1. Copy `full-opencode.json` to your opencode config directory: - - Global: `~/.config/opencode/opencode.json` - - Project: `/.opencode.json` +2. **Copy the appropriate config** based on your version: + ```bash + # For v1.0.210+ (recommended): + cp config/opencode-modern.json ~/.config/opencode/opencode.json -2. **DO NOT modify** the configuration unless you know exactly what you're doing. The provided settings have been tested and verified to work. + # For older versions: + cp config/opencode-legacy.json ~/.config/opencode/opencode.json + ``` -3. Run opencode: `opencode run "your prompt" --model=openai/gpt-5.1-codex-medium` +3. **Run opencode**: + ```bash + # Modern config (v1.0.210+): + opencode run "task" --model=openai/gpt-5.2:medium + opencode run "task" --model=openai/gpt-5.2:high -> **⚠️ Critical**: GPT 5 models require this exact configuration. Do not use minimal configs or create custom variants - they are not supported and will fail unpredictably. + # Legacy config: + opencode run "task" --model=openai/gpt-5.2-medium + opencode run "task" --model=openai/gpt-5.2-high + ``` + +> **⚠️ Important**: Use the config file appropriate for your OpenCode version. Using the modern config with an older OpenCode version (v1.0.209 or below) will not work correctly. + +## Available Models + +Both configs provide access to the same model families: + +- **gpt-5.2** (none/low/medium/high/xhigh) - Latest GPT 5.2 model with full reasoning support +- **gpt-5.2-codex** (low/medium/high/xhigh) - GPT 5.2 Codex presets +- **gpt-5.1-codex-max** (low/medium/high/xhigh) - Codex Max presets +- **gpt-5.1-codex** (low/medium/high) - Codex model presets +- **gpt-5.1-codex-mini** (medium/high) - Codex mini tier presets +- **gpt-5.1** (none/low/medium/high) - General-purpose reasoning presets + +All appear in the opencode model selector as "GPT 5.1 Codex Low (OAuth)", "GPT 5.1 High (OAuth)", etc. ## Configuration Options See the main [README.md](../README.md#configuration) for detailed documentation of all configuration options. -**Remember**: Use `full-opencode.json` as-is for guaranteed compatibility. Custom configurations are not officially supported. +## Version History + +- **January 2026 (v1.0.210+)**: Introduced variant system support. Use `opencode-modern.json` +- **December 2025 and earlier**: Use `opencode-legacy.json`