diff --git a/README.md b/README.md index b6ff7fb..3aa43e1 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ This plugin enables opencode to use OpenAI's Codex backend via ChatGPT Plus/Pro OAuth authentication, allowing you to use your ChatGPT subscription instead of OpenAI Platform API credits. > **Found this useful?** -Follow me on [X @nummanali](https://x.com/nummanali) for future updates and more projects! +> Follow me on [X @nummanali](https://x.com/nummanali) for future updates and more projects! ## ⚠️ Terms of Service & Usage Notice @@ -21,6 +21,7 @@ Follow me on [X @nummanali](https://x.com/nummanali) for future updates and more **This tool uses OpenAI's official OAuth authentication** (the same method as OpenAI's official Codex CLI). However, users are responsible for ensuring their usage complies with OpenAI's terms. ### ⚠️ Not Suitable For: + - Commercial API resale or white-labeling - High-volume automated extraction beyond personal use - Applications serving multiple users with one subscription @@ -86,7 +87,7 @@ OpenCode will detect the version mismatch and install the new version automatica If you previously used an unpinned version, clear the cache: ```bash -rm -rf ~/.cache/opencode/node_modules ~/.cache/opencode/bun.lock +rm -rf ~/.cache/opencode ``` Then restart OpenCode with a pinned version in your config. @@ -97,11 +98,13 @@ Check [releases](https://github.com/numman-ali/opencode-openai-codex-auth/releas ### Agent Instructions (Install / Update) -If you’re using an AI agent (Codex/Claude/etc.) to install or update this plugin on a machine, use these steps: +If you're using an AI agent (Codex/Claude/etc.) to install or update this plugin on a machine, use these steps: 1. **Use the official repo**: https://github.com/numman-ali/opencode-openai-codex-auth -2. **Pin the plugin version** in the user’s OpenCode config (`~/.config/opencode/opencode.json` or project `.opencode.json`). -3. **Replace the config** with the latest `config/full-opencode.json` from the repo. +2. **Pin the plugin version** in the user's OpenCode config (`~/.config/opencode/opencode.json` or project `.opencode.json`). +3. **Determine OpenCode version** and copy the appropriate config: + - **v1.0.210+**: Use `config/opencode-modern.json` (compact, uses variants) + - **v1.0.209 or older**: Use `config/opencode-legacy.json` (full model list) 4. **Refresh the plugin cache** so OpenCode reinstalls the updated version. 5. **Restart OpenCode**. @@ -109,15 +112,21 @@ If you’re using an AI agent (Codex/Claude/etc.) to install or update this plug # 1) Update plugin version (replace with newest release tag) # Example: opencode-openai-codex-auth@4.2.0 -# 2) Copy full config -cp /config/full-opencode.json ~/.config/opencode/opencode.json +# 2) Check OpenCode version +opencode --version + +# 3) Copy appropriate config based on version +# For v1.0.210+ (recommended): +cp /config/opencode-modern.json ~/.config/opencode/opencode.json -# 3) Refresh OpenCode plugin cache +# For older versions: +cp /config/opencode-legacy.json ~/.config/opencode/opencode.json + +# 4) Refresh OpenCode plugin cache rm -rf ~/.cache/opencode/node_modules ~/.cache/opencode/bun.lock -# 4) Optional sanity check for GPT-5.2-Codex presets -jq '.provider.openai.models | keys | map(select(startswith("gpt-5.2-codex")))' \ - ~/.config/opencode/opencode.json +# 5) Optional sanity check for available models +jq '.provider.openai.models | keys | length' ~/.config/opencode/opencode.json ``` > **Note**: If using a project-local config, replace the target path with `/.opencode.json`. @@ -126,107 +135,64 @@ jq '.provider.openai.models | keys | map(select(startswith("gpt-5.2-codex")))' \ #### ⚠️ REQUIRED: Full Configuration (Only Supported Setup) -**IMPORTANT**: You MUST use the full configuration from [`config/full-opencode.json`](./config/full-opencode.json). Other configurations are not officially supported and may not work reliably. +**IMPORTANT**: You MUST use one of the pre-configured files from the `config/` directory. Other configurations are not officially supported and may not work reliably. -**Why the full config is required:** -- GPT 5 models can be temperamental - some work, some don't, some may error -- The full config has been tested and verified to work -- Minimal configs lack proper model metadata for OpenCode features -- Older GPT 5.0 models are deprecated and being phased out by OpenAI +**Two configuration files available based on your OpenCode version:** -1. **Copy the full configuration** from [`config/full-opencode.json`](./config/full-opencode.json) to your opencode config file. +| File | OpenCode Version | Description | +| -------------------------------------------------------------- | ------------------------- | ------------------------------------------------------------------------------------------------------- | +| [`config/opencode-modern.json`](./config/opencode-modern.json) | **v1.0.210+ (Jan 2026+)** | Compact config using variants system - 6 models with built-in reasoning level variants | +| [`config/opencode-legacy.json`](./config/opencode-legacy.json) | **v1.0.209 and below** | Extended config with separate model entries for each reasoning level - 20+ individual model definitions | - The config includes 22 models with image input support. Here's a condensed example showing the structure: +**Why two configs?** -```json -{ - "$schema": "https://opencode.ai/config.json", - "plugin": ["opencode-openai-codex-auth@4.2.0"], - "provider": { - "openai": { - "options": { - "reasoningEffort": "medium", - "reasoningSummary": "auto", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false - }, - "models": { - "gpt-5.2-high": { - "name": "GPT 5.2 High (OAuth)", - "limit": { "context": 272000, "output": 128000 }, - "modalities": { "input": ["text", "image"], "output": ["text"] }, - "options": { - "reasoningEffort": "high", - "reasoningSummary": "detailed", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false - } - }, - "gpt-5.1-codex-max-high": { - "name": "GPT 5.1 Codex Max High (OAuth)", - "limit": { "context": 272000, "output": 128000 }, - "modalities": { "input": ["text", "image"], "output": ["text"] }, - "options": { - "reasoningEffort": "high", - "reasoningSummary": "detailed", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false - } - } - // ... 20 more models - see config/full-opencode.json for complete list - } - } - } -} -``` +- OpenCode v1.0.210+ introduced a **variants system** that allows defining reasoning effort levels as variants under a single model +- This reduces config size from 572 lines to ~150 lines while maintaining the same functionality +- Use the legacy config if you're on an older OpenCode version - **⚠️ Copy the complete file** from [`config/full-opencode.json`](./config/full-opencode.json) - don't use this truncated example. +**How to choose:** - **Global config**: `~/.config/opencode/opencode.json` - **Project config**: `/.opencode.json` +1. **If you have OpenCode v1.0.210 or newer** (check with `opencode --version`): + - ✅ Use [`config/opencode-modern.json`](./config/opencode-modern.json) + - Benefits: Cleaner config, built-in variant cycling with `Ctrl+T`, easier to maintain - This gives you 22 model variants with different reasoning levels: - - **gpt-5.2** (none/low/medium/high/xhigh) - Latest GPT 5.2 model with full reasoning support - - **gpt-5.2-codex** (low/medium/high/xhigh) - GPT 5.2 Codex presets - - **gpt-5.1-codex-max** (low/medium/high/xhigh) - Codex Max presets - - **gpt-5.1-codex** (low/medium/high) - Codex model presets - - **gpt-5.1-codex-mini** (medium/high) - Codex mini tier presets - - **gpt-5.1** (none/low/medium/high) - General-purpose reasoning presets +2. **If you have OpenCode v1.0.209 or older**: + - ✅ Use [`config/opencode-legacy.json`](./config/opencode-legacy.json) + - This provides the same 20+ model variants as separate entries - All appear in the opencode model selector as "GPT 5.1 Codex Low (OAuth)", "GPT 5.1 High (OAuth)", etc. +**Quick install:** -### Prompt caching & usage limits +```bash +# For OpenCode v1.0.210+ (recommended) +cp /config/opencode-modern.json ~/.config/opencode/opencode.json -Codex backend caching is enabled automatically. When OpenCode supplies a `prompt_cache_key` (its session identifier), the plugin forwards it unchanged so Codex can reuse work between turns. The plugin no longer synthesizes its own cache IDs—if the host omits `prompt_cache_key`, Codex will treat the turn as uncached. The bundled CODEX_MODE bridge prompt is synchronized with the latest Codex CLI release, so opencode and Codex stay in lock-step on tool availability. When your ChatGPT subscription nears a limit, opencode surfaces the plugin's friendly error message with the 5-hour and weekly windows, mirroring the Codex CLI summary. +# For older OpenCode versions +cp /config/opencode-legacy.json ~/.config/opencode/opencode.json +``` -> **⚠️ IMPORTANT:** You MUST use the full configuration above. OpenCode's context auto-compaction and usage sidebar only work with the full config. Additionally, GPT 5 models require proper configuration - minimal configs are NOT supported and may fail unpredictably. +**What you get:** -#### ❌ Minimal Configuration (NOT RECOMMENDED - DO NOT USE) +| Config File | Model Families | Reasoning Variants | Total Models | +| ---------------------- | -------------- | ----------------------------------------- | ------------------------------------ | +| `opencode-modern.json` | 6 | Built-in variants (low/medium/high/xhigh) | 6 base models with 19 total variants | +| `opencode-legacy.json` | 6 | Separate model entries | 20 individual model definitions | -**DO NOT use minimal configurations** - they are not supported for GPT 5.1 and will not work reliably: +Both configs provide access to the same model families: -```json -// ❌ DO NOT USE THIS - WILL NOT WORK RELIABLY -{ - "$schema": "https://opencode.ai/config.json", - "plugin": [ - "opencode-openai-codex-auth" - ], - "model": "openai/gpt-5-codex" -} -``` +- **gpt-5.2** (none/low/medium/high/xhigh) - Latest GPT 5.2 model with full reasoning support +- **gpt-5.2-codex** (low/medium/high/xhigh) - GPT 5.2 Codex presets +- **gpt-5.1-codex-max** (low/medium/high/xhigh) - Codex Max presets +- **gpt-5.1-codex** (low/medium/high) - Codex model presets +- **gpt-5.1-codex-mini** (medium/high) - Codex mini tier presets +- **gpt-5.1** (none/low/medium/high) - General-purpose reasoning presets -**Why this doesn't work:** -- Unpinned plugin version won't receive updates (see [Plugin Versioning](#plugin-versioning--updates)) -- GPT 5 models are temperamental and need proper configuration -- Missing model metadata breaks OpenCode features -- No support for usage limits or context compaction -- Cannot guarantee stable operation +All appear in the opencode model selector as "GPT 5.1 Codex Low (OAuth)", "GPT 5.1 High (OAuth)", etc. -2. **That's it!** opencode will auto-install the plugin on first run. +### Prompt caching & usage limits + +Codex backend caching is enabled automatically. When OpenCode supplies a `prompt_cache_key` (its session identifier), the plugin forwards it unchanged so Codex can reuse work between turns. The plugin no longer synthesizes its own cache IDs—if the host omits `prompt_cache_key`, Codex will treat the turn as uncached. The bundled CODEX_MODE bridge prompt is synchronized with the latest Codex CLI release, so opencode and Codex stay in lock-step on tool availability. When your ChatGPT subscription nears a limit, opencode surfaces the plugin's friendly error message with the 5-hour and weekly windows, mirroring the Codex CLI summary. + +> **⚠️ IMPORTANT:** You MUST use the full configuration above. OpenCode's context auto-compaction and usage sidebar only work with the full config. Additionally, GPT 5 models require proper configuration - minimal configs are NOT supported and may fail unpredictably. > **New to opencode?** Learn more at [opencode.ai](https://opencode.ai) @@ -262,34 +228,65 @@ opencode run "balanced task" --model=openai/gpt-5.1-codex-mini-medium opencode run "complex code" --model=openai/gpt-5.1-codex-mini-high ``` -### Available Model Variants (Full Config) - -When using [`config/full-opencode.json`](./config/full-opencode.json), you get these pre-configured variants: - -| CLI Model ID | TUI Display Name | Reasoning Effort | Best For | -|--------------|------------------|-----------------|----------| -| `gpt-5.2-none` | GPT 5.2 None (OAuth) | None | Fastest GPT 5.2 responses (no reasoning) | -| `gpt-5.2-low` | GPT 5.2 Low (OAuth) | Low | Fast GPT 5.2 responses | -| `gpt-5.2-medium` | GPT 5.2 Medium (OAuth) | Medium | Balanced GPT 5.2 tasks | -| `gpt-5.2-high` | GPT 5.2 High (OAuth) | High | Complex GPT 5.2 reasoning | -| `gpt-5.2-xhigh` | GPT 5.2 Extra High (OAuth) | xHigh | Deep GPT 5.2 analysis | -| `gpt-5.2-codex-low` | GPT 5.2 Codex Low (OAuth) | Low | Fast GPT 5.2 Codex responses | -| `gpt-5.2-codex-medium` | GPT 5.2 Codex Medium (OAuth) | Medium | Balanced GPT 5.2 Codex coding tasks | -| `gpt-5.2-codex-high` | GPT 5.2 Codex High (OAuth) | High | Complex GPT 5.2 Codex reasoning & tools | -| `gpt-5.2-codex-xhigh` | GPT 5.2 Codex Extra High (OAuth) | xHigh | Deep GPT 5.2 Codex long-horizon work | -| `gpt-5.1-codex-max-low` | GPT 5.1 Codex Max Low (OAuth) | Low | Fast exploratory large-context work | -| `gpt-5.1-codex-max-medium` | GPT 5.1 Codex Max Medium (OAuth) | Medium | Balanced large-context builds | -| `gpt-5.1-codex-max-high` | GPT 5.1 Codex Max High (OAuth) | High | Long-horizon builds, large refactors | -| `gpt-5.1-codex-max-xhigh` | GPT 5.1 Codex Max Extra High (OAuth) | xHigh | Deep multi-hour agent loops, research/debug marathons | -| `gpt-5.1-codex-low` | GPT 5.1 Codex Low (OAuth) | Low | Fast code generation | -| `gpt-5.1-codex-medium` | GPT 5.1 Codex Medium (OAuth) | Medium | Balanced code tasks | -| `gpt-5.1-codex-high` | GPT 5.1 Codex High (OAuth) | High | Complex code & tools | -| `gpt-5.1-codex-mini-medium` | GPT 5.1 Codex Mini Medium (OAuth) | Medium | Lightweight Codex mini tier | -| `gpt-5.1-codex-mini-high` | GPT 5.1 Codex Mini High (OAuth) | High | Codex Mini with maximum reasoning | -| `gpt-5.1-none` | GPT 5.1 None (OAuth) | None | Fastest GPT 5.1 responses (no reasoning) | -| `gpt-5.1-low` | GPT 5.1 Low (OAuth) | Low | Faster responses with light reasoning | -| `gpt-5.1-medium` | GPT 5.1 Medium (OAuth) | Medium | Balanced general-purpose tasks | -| `gpt-5.1-high` | GPT 5.1 High (OAuth) | High | Deep reasoning, complex problems | +### Available Model Variants + +When using the recommended config file, you get pre-configured variants. The model ID format differs based on your OpenCode version: + +**For OpenCode v1.0.210+ (modern config with variants):** + +Use the base model with variant suffix: + +```bash +# Variant cycling available with Ctrl+T in TUI +opencode run "task" --model=openai/gpt-5.2 --variant=low +opencode run "task" --model=openai/gpt-5.2 --variant=medium +opencode run "task" --model=openai/gpt-5.2 --variant=high +opencode run "task" --model=openai/gpt-5.2 --variant=xhigh +``` + +| Base Model | Available Variants | TUI Display Name | +| -------------------- | ------------------------------ | ------------------------------------ | +| `gpt-5.2` | none, low, medium, high, xhigh | GPT 5.2 {variant} (OAuth) | +| `gpt-5.2-codex` | low, medium, high, xhigh | GPT 5.2 Codex {variant} (OAuth) | +| `gpt-5.1-codex-max` | low, medium, high, xhigh | GPT 5.1 Codex Max {variant} (OAuth) | +| `gpt-5.1-codex` | low, medium, high | GPT 5.1 Codex {variant} (OAuth) | +| `gpt-5.1-codex-mini` | medium, high | GPT 5.1 Codex Mini {variant} (OAuth) | +| `gpt-5.1` | none, low, medium, high | GPT 5.1 {variant} (OAuth) | + +**For OpenCode v1.0.209 and below (legacy config with separate entries):** + +Use explicit model IDs: + +```bash +opencode run "task" --model=openai/gpt-5.2-low +opencode run "task" --model=openai/gpt-5.2-medium +opencode run "task" --model=openai/gpt-5.2-high +``` + +| CLI Model ID | TUI Display Name | Reasoning Effort | Best For | +| --------------------------- | ------------------------------------ | ---------------- | ----------------------------------------------------- | +| `gpt-5.2-none` | GPT 5.2 None (OAuth) | None | Fastest GPT 5.2 responses (no reasoning) | +| `gpt-5.2-low` | GPT 5.2 Low (OAuth) | Low | Fast GPT 5.2 responses | +| `gpt-5.2-medium` | GPT 5.2 Medium (OAuth) | Medium | Balanced GPT 5.2 tasks | +| `gpt-5.2-high` | GPT 5.2 High (OAuth) | High | Complex GPT 5.2 reasoning | +| `gpt-5.2-xhigh` | GPT 5.2 Extra High (OAuth) | xHigh | Deep GPT 5.2 analysis | +| `gpt-5.2-codex-low` | GPT 5.2 Codex Low (OAuth) | Low | Fast GPT 5.2 Codex responses | +| `gpt-5.2-codex-medium` | GPT 5.2 Codex Medium (OAuth) | Medium | Balanced GPT 5.2 Codex coding tasks | +| `gpt-5.2-codex-high` | GPT 5.2 Codex High (OAuth) | High | Complex GPT 5.2 Codex reasoning & tools | +| `gpt-5.2-codex-xhigh` | GPT 5.2 Codex Extra High (OAuth) | xHigh | Deep GPT 5.2 Codex long-horizon work | +| `gpt-5.1-codex-max-low` | GPT 5.1 Codex Max Low (OAuth) | Low | Fast exploratory large-context work | +| `gpt-5.1-codex-max-medium` | GPT 5.1 Codex Max Medium (OAuth) | Medium | Balanced large-context builds | +| `gpt-5.1-codex-max-high` | GPT 5.1 Codex Max High (OAuth) | High | Long-horizon builds, large refactors | +| `gpt-5.1-codex-max-xhigh` | GPT 5.1 Codex Max Extra High (OAuth) | xHigh | Deep multi-hour agent loops, research/debug marathons | +| `gpt-5.1-codex-low` | GPT 5.1 Codex Low (OAuth) | Low | Fast code generation | +| `gpt-5.1-codex-medium` | GPT 5.1 Codex Medium (OAuth) | Medium | Balanced code tasks | +| `gpt-5.1-codex-high` | GPT 5.1 Codex High (OAuth) | High | Complex code & tools | +| `gpt-5.1-codex-mini-medium` | GPT 5.1 Codex Mini Medium (OAuth) | Medium | Lightweight Codex mini tier | +| `gpt-5.1-codex-mini-high` | GPT 5.1 Codex Mini High (OAuth) | High | Codex Mini with maximum reasoning | +| `gpt-5.1-none` | GPT 5.1 None (OAuth) | None | Fastest GPT 5.1 responses (no reasoning) | +| `gpt-5.1-low` | GPT 5.1 Low (OAuth) | Low | Faster responses with light reasoning | +| `gpt-5.1-medium` | GPT 5.1 Medium (OAuth) | Medium | Balanced general-purpose tasks | +| `gpt-5.1-high` | GPT 5.1 High (OAuth) | High | Deep reasoning, complex problems | **Usage**: `--model=openai/` (e.g., `--model=openai/gpt-5.1-codex-low`) **Display**: TUI shows the friendly name (e.g., "GPT 5.1 Codex Low (OAuth)") @@ -298,7 +295,7 @@ When using [`config/full-opencode.json`](./config/full-opencode.json), you get t > > **Note**: GPT 5.2, GPT 5.2 Codex, and Codex Max all support `xhigh` reasoning. Use explicit reasoning levels (e.g., `gpt-5.2-high`, `gpt-5.2-codex-xhigh`, `gpt-5.1-codex-max-xhigh`) for precise control. -> **⚠️ Important**: GPT 5 models can be temperamental - some variants may work better than others, some may give errors, and behavior may vary. Stick to the presets above configured in `full-opencode.json` for best results. +> **⚠️ Important**: GPT 5 models can be temperamental - some variants may work better than others, some may give errors, and behavior may vary. Stick to the presets above configured in your config file for best results. All accessed via your ChatGPT Plus/Pro subscription. @@ -340,12 +337,28 @@ These defaults are tuned for Codex CLI-style usage and can be customized (see Co ### ⚠️ REQUIRED: Use Pre-Configured File -**YOU MUST use [`config/full-opencode.json`](./config/full-opencode.json)** - this is the only officially supported configuration: -- 22 pre-configured model variants (GPT 5.2, GPT 5.2 Codex, GPT 5.1, Codex, Codex Max, Codex Mini) -- Image input support enabled for all models -- Optimal configuration for each reasoning level -- All variants visible in the opencode model selector -- Required metadata for OpenCode features to work properly +**YOU MUST use one of the pre-configured files from the `config/` directory** - this is the only officially supported configuration: + +**For OpenCode v1.0.210+ (Jan 2026+):** + +- ✅ Use [`config/opencode-modern.json`](./config/opencode-modern.json) +- 6 base models with built-in variants +- ~150 lines, easier to maintain +- Built-in variant cycling (`Ctrl+T`) + +**For OpenCode v1.0.209 and below:** + +- ✅ Use [`config/opencode-legacy.json`](./config/opencode-legacy.json) +- 20+ individual model definitions +- 572 lines, compatible with older versions + +Both configs provide: + +- ✅ Pre-configured model variants for all reasoning levels +- ✅ Image input support enabled for all models +- ✅ Optimal configuration for each reasoning level +- ✅ All variants visible in the opencode model selector +- ✅ Required metadata for OpenCode features to work properly **Do NOT use other configurations** - they are not supported and may fail unpredictably with GPT 5 models. @@ -359,14 +372,15 @@ If you want to customize settings yourself, you can configure options at provide ⚠️ **Important**: Families have different supported values. -| Setting | GPT-5.2 Values | GPT-5.2-Codex Values | GPT-5.1 Values | GPT-5.1-Codex Values | GPT-5.1-Codex-Max Values | Plugin Default | -|---------|---------------|----------------------|----------------|----------------------|---------------------------|----------------| -| `reasoningEffort` | `none`, `low`, `medium`, `high`, `xhigh` | `low`, `medium`, `high`, `xhigh` | `none`, `low`, `medium`, `high` | `low`, `medium`, `high` | `low`, `medium`, `high`, `xhigh` | `medium` (global), `high` for Codex Max/5.2/5.2 Codex | -| `reasoningSummary` | `auto`, `concise`, `detailed` | `auto`, `concise`, `detailed` | `auto`, `concise`, `detailed` | `auto`, `concise`, `detailed` | `auto`, `concise`, `detailed`, `off`, `on` | `auto` | -| `textVerbosity` | `low`, `medium`, `high` | `medium` or `high` | `low`, `medium`, `high` | `medium` or `high` | `medium` or `high` | `medium` | -| `include` | Array of strings | Array of strings | Array of strings | Array of strings | Array of strings | `["reasoning.encrypted_content"]` | +| Setting | GPT-5.2 Values | GPT-5.2-Codex Values | GPT-5.1 Values | GPT-5.1-Codex Values | GPT-5.1-Codex-Max Values | Plugin Default | +| ------------------ | ---------------------------------------- | -------------------------------- | ------------------------------- | ----------------------------- | ------------------------------------------ | ----------------------------------------------------- | +| `reasoningEffort` | `none`, `low`, `medium`, `high`, `xhigh` | `low`, `medium`, `high`, `xhigh` | `none`, `low`, `medium`, `high` | `low`, `medium`, `high` | `low`, `medium`, `high`, `xhigh` | `medium` (global), `high` for Codex Max/5.2/5.2 Codex | +| `reasoningSummary` | `auto`, `concise`, `detailed` | `auto`, `concise`, `detailed` | `auto`, `concise`, `detailed` | `auto`, `concise`, `detailed` | `auto`, `concise`, `detailed`, `off`, `on` | `auto` | +| `textVerbosity` | `low`, `medium`, `high` | `medium` or `high` | `low`, `medium`, `high` | `medium` or `high` | `medium` or `high` | `medium` | +| `include` | Array of strings | Array of strings | Array of strings | Array of strings | Array of strings | `["reasoning.encrypted_content"]` | > **Notes**: +> > - GPT 5.2 and GPT 5.1 (general purpose) support `none` reasoning per OpenAI API docs. > - `none` is NOT supported for Codex variants (including GPT 5.2 Codex) - auto-converts to `low` for Codex/Codex Max, or `medium` for Codex Mini. > - GPT 5.2, GPT 5.2 Codex, and Codex Max support `xhigh` reasoning. @@ -443,6 +457,7 @@ This plugin respects the same rate limits enforced by OpenAI's official Codex CL - **The plugin does NOT and CANNOT bypass** OpenAI's rate limits ### Best Practices: + - ✅ Use for individual coding tasks, not bulk processing - ✅ Avoid rapid-fire automated requests - ✅ Monitor your usage to stay within subscription limits @@ -494,6 +509,7 @@ See [Troubleshooting Guide](https://numman-ali.github.io/opencode-openai-codex-a This plugin uses **OpenAI's official OAuth authentication** (the same method as their official Codex CLI). It's designed for personal coding assistance with your own ChatGPT subscription. However, **users are responsible for ensuring their usage complies with OpenAI's Terms of Use**. This means: + - Personal use for your own development - Respecting rate limits - Not reselling access or powering commercial services @@ -510,12 +526,14 @@ For commercial applications, production systems, or services serving multiple us Using OAuth authentication for personal coding assistance aligns with OpenAI's official Codex CLI use case. However, violating OpenAI's terms could result in account action: **Safe use:** + - Personal coding assistance - Individual productivity - Legitimate development work - Respecting rate limits **Risky use:** + - Commercial resale of access - Powering multi-user services - High-volume automated extraction @@ -524,6 +542,7 @@ Using OAuth authentication for personal coding assistance aligns with OpenAI's o ### What's the difference between this and scraping session tokens? **Critical distinction:** + - ✅ **This plugin:** Uses official OAuth authentication through OpenAI's authorization server - ❌ **Session scraping:** Extracts cookies/tokens from browsers (clearly violates TOS) @@ -548,12 +567,14 @@ ChatGPT, GPT-5, and Codex are trademarks of OpenAI. ## Credits & Attribution This plugin implements OAuth authentication for OpenAI's Codex backend, using the same authentication flow as: + - [OpenAI's official Codex CLI](https://github.com/openai/codex) - OpenAI's OAuth authorization server (https://chatgpt.com/oauth) ### Acknowledgments Based on research and working implementations from: + - [ben-vargas/ai-sdk-provider-chatgpt-oauth](https://github.com/ben-vargas/ai-sdk-provider-chatgpt-oauth) - [ben-vargas/ai-opencode-chatgpt-auth](https://github.com/ben-vargas/ai-opencode-chatgpt-auth) - [openai/codex](https://github.com/openai/codex) OAuth flow @@ -568,6 +589,7 @@ Based on research and working implementations from: ## Documentation **📖 Documentation:** + - [Installation](#installation) - Get started in 2 minutes - [Configuration](#configuration) - Customize your setup - [Troubleshooting](#troubleshooting) - Common issues diff --git a/config/README.md b/config/README.md index 2722711..3ddd69c 100644 --- a/config/README.md +++ b/config/README.md @@ -1,59 +1,101 @@ # Configuration -This directory contains the official opencode configuration for the OpenAI Codex OAuth plugin. +This directory contains the official opencode configuration files for the OpenAI Codex OAuth plugin. -## ⚠️ REQUIRED Configuration File +## ⚠️ REQUIRED: Choose the Right Configuration -### full-opencode.json (REQUIRED - USE THIS ONLY) +**Two configuration files are available based on your OpenCode version:** -**YOU MUST use this configuration file** - it is the ONLY officially supported setup: +| File | OpenCode Version | Description | +|------|------------------|-------------| +| [`opencode-modern.json`](./opencode-modern.json) | **v1.0.210+ (Jan 2026+)** | Compact config using variants system - 6 models with built-in reasoning level variants | +| [`opencode-legacy.json`](./opencode-legacy.json) | **v1.0.209 and below** | Extended config with separate model entries for each reasoning level - 20+ individual model definitions | +### Which one should I use? + +**If you have OpenCode v1.0.210 or newer** (check with `opencode --version`): +```bash +cp config/opencode-modern.json ~/.config/opencode/opencode.json +``` + +**If you have OpenCode v1.0.209 or older**: ```bash -cp config/full-opencode.json ~/.config/opencode/opencode.json +cp config/opencode-legacy.json ~/.config/opencode/opencode.json ``` -**Why this is required:** -- GPT 5 models can be temperamental and need proper configuration -- Contains 22 verified GPT 5.2/5.1 model variants (GPT 5.2, GPT 5.2 Codex, Codex, Codex Max, Codex Mini, and general GPT 5.1 including `gpt-5.1-codex-max-low/medium/high/xhigh`) -- Includes all required metadata for OpenCode features -- Guaranteed to work reliably -- Global options for all models + per-model configuration overrides +### Why two configs? + +OpenCode v1.0.210+ introduced a **variants system** that allows defining reasoning effort levels as variants under a single model. This reduces config size from 572 lines to ~150 lines while maintaining the same functionality. + +**What you get:** -**What's included:** -- All supported GPT 5.2/5.1 variants: gpt-5.2, gpt-5.2-codex, gpt-5.1, gpt-5.1-codex, gpt-5.1-codex-max, gpt-5.1-codex-mini -- Proper reasoning effort settings for each variant (including new `xhigh` for Codex Max) -- Context limits (272k context / 128k output for all Codex families, including Codex Max) -- Required options: `store: false`, `include: ["reasoning.encrypted_content"]` +| Config File | Model Families | Reasoning Variants | Total Models | +|------------|----------------|-------------------|--------------| +| `opencode-modern.json` | 6 | Built-in variants (low/medium/high/xhigh) | 6 base models with 19 total variants | +| `opencode-legacy.json` | 6 | Separate model entries | 20 individual model definitions | -### ❌ Other Configurations (NOT SUPPORTED) +Both configs provide: +- ✅ All supported GPT 5.2/5.1 variants: gpt-5.2, gpt-5.2-codex, gpt-5.1, gpt-5.1-codex, gpt-5.1-codex-max, gpt-5.1-codex-mini +- ✅ Proper reasoning effort settings for each variant (including `xhigh` for Codex Max/5.2) +- ✅ Context limits (272k context / 128k output for all Codex families) +- ✅ Required options: `store: false`, `include: ["reasoning.encrypted_content"]` +- ✅ Image input support for all models +- ✅ All required metadata for OpenCode features -**DO NOT use:** -- `minimal-opencode.json` - NOT supported, will fail unpredictably -- `full-opencode-gpt5.json` - DEPRECATED, GPT 5.0 models are being phased out by OpenAI -- Custom configurations - NOT recommended, may not work reliably +### Modern Config Benefits (v1.0.210+) -**Why other configs don't work:** -- GPT 5 models need specific configurations -- Missing metadata breaks OpenCode features -- No support for usage limits or context compaction -- Cannot guarantee stable operation +- **74% smaller**: 150 lines vs 572 lines +- **DRY**: Common options defined once at provider level +- **Variant cycling**: Built-in support for `Ctrl+T` to switch reasoning levels +- **Easier maintenance**: Add new variants without copying model definitions ## Usage -**ONLY ONE OPTION** - use the full configuration: +1. **Check your OpenCode version**: + ```bash + opencode --version + ``` -1. Copy `full-opencode.json` to your opencode config directory: - - Global: `~/.config/opencode/opencode.json` - - Project: `/.opencode.json` +2. **Copy the appropriate config** based on your version: + ```bash + # For v1.0.210+ (recommended): + cp config/opencode-modern.json ~/.config/opencode/opencode.json -2. **DO NOT modify** the configuration unless you know exactly what you're doing. The provided settings have been tested and verified to work. + # For older versions: + cp config/opencode-legacy.json ~/.config/opencode/opencode.json + ``` -3. Run opencode: `opencode run "your prompt" --model=openai/gpt-5.1-codex-medium` +3. **Run opencode**: + ```bash + # Modern config (v1.0.210+): + opencode run "task" --model=openai/gpt-5.2:medium + opencode run "task" --model=openai/gpt-5.2:high -> **⚠️ Critical**: GPT 5 models require this exact configuration. Do not use minimal configs or create custom variants - they are not supported and will fail unpredictably. + # Legacy config: + opencode run "task" --model=openai/gpt-5.2-medium + opencode run "task" --model=openai/gpt-5.2-high + ``` + +> **⚠️ Important**: Use the config file appropriate for your OpenCode version. Using the modern config with an older OpenCode version (v1.0.209 or below) will not work correctly. + +## Available Models + +Both configs provide access to the same model families: + +- **gpt-5.2** (none/low/medium/high/xhigh) - Latest GPT 5.2 model with full reasoning support +- **gpt-5.2-codex** (low/medium/high/xhigh) - GPT 5.2 Codex presets +- **gpt-5.1-codex-max** (low/medium/high/xhigh) - Codex Max presets +- **gpt-5.1-codex** (low/medium/high) - Codex model presets +- **gpt-5.1-codex-mini** (medium/high) - Codex mini tier presets +- **gpt-5.1** (none/low/medium/high) - General-purpose reasoning presets + +All appear in the opencode model selector as "GPT 5.1 Codex Low (OAuth)", "GPT 5.1 High (OAuth)", etc. ## Configuration Options See the main [README.md](../README.md#configuration) for detailed documentation of all configuration options. -**Remember**: Use `full-opencode.json` as-is for guaranteed compatibility. Custom configurations are not officially supported. +## Version History + +- **January 2026 (v1.0.210+)**: Introduced variant system support. Use `opencode-modern.json` +- **December 2025 and earlier**: Use `opencode-legacy.json` diff --git a/config/full-opencode.json b/config/opencode-legacy.json similarity index 100% rename from config/full-opencode.json rename to config/opencode-legacy.json diff --git a/config/opencode-modern.json b/config/opencode-modern.json new file mode 100644 index 0000000..c274f47 --- /dev/null +++ b/config/opencode-modern.json @@ -0,0 +1,239 @@ +{ + "$schema": "https://opencode.ai/config.json", + "plugin": [ + "opencode-openai-codex-auth@4.2.0" + ], + "provider": { + "openai": { + "options": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium", + "include": [ + "reasoning.encrypted_content" + ], + "store": false + }, + "models": { + "gpt-5.2": { + "name": "GPT 5.2 (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "none": { + "reasoningEffort": "none", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.2-codex": { + "name": "GPT 5.2 Codex (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.1-codex-max": { + "name": "GPT 5.1 Codex Max (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "low": { + "reasoningEffort": "low", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.1-codex": { + "name": "GPT 5.1 Codex (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.1-codex-mini": { + "name": "GPT 5.1 Codex Mini (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, + "gpt-5.1": { + "name": "GPT 5.1 (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "none": { + "reasoningEffort": "none", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "low" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "high" + } + } + } + } + } + } +} diff --git a/lib/prompts/opencode-codex.ts b/lib/prompts/opencode-codex.ts index 036b14b..d93a276 100644 --- a/lib/prompts/opencode-codex.ts +++ b/lib/prompts/opencode-codex.ts @@ -10,7 +10,7 @@ import { homedir } from "node:os"; import { mkdir, readFile, writeFile } from "node:fs/promises"; const OPENCODE_CODEX_URL = - "https://raw.githubusercontent.com/sst/opencode/dev/packages/opencode/src/session/prompt/codex.txt"; + "https://raw.githubusercontent.com/anomalyco/opencode/dev/packages/opencode/src/session/prompt/codex.txt"; const CACHE_DIR = join(homedir(), ".opencode", "cache"); const CACHE_FILE = join(CACHE_DIR, "opencode-codex.txt"); const CACHE_META_FILE = join(CACHE_DIR, "opencode-codex-meta.json"); diff --git a/lib/request/request-transformer.ts b/lib/request/request-transformer.ts index 05705ed..90d91b0 100644 --- a/lib/request/request-transformer.ts +++ b/lib/request/request-transformer.ts @@ -1,14 +1,15 @@ +import * as fs from "node:fs"; import { logDebug, logWarn } from "../logger.js"; import { TOOL_REMAP_MESSAGE } from "../prompts/codex.js"; import { CODEX_OPENCODE_BRIDGE } from "../prompts/codex-opencode-bridge.js"; import { getOpenCodeCodexPrompt } from "../prompts/opencode-codex.js"; import { getNormalizedModel } from "./helpers/model-map.js"; import type { - ConfigOptions, - InputItem, - ReasoningConfig, - RequestBody, - UserConfig, + ConfigOptions, + InputItem, + ReasoningConfig, + RequestBody, + UserConfig, } from "../types.js"; /** @@ -21,86 +22,86 @@ import type { * @returns Normalized model name (e.g., "gpt-5.1-codex", "gpt-5-codex") */ export function normalizeModel(model: string | undefined): string { - if (!model) return "gpt-5.1"; - - // Strip provider prefix if present (e.g., "openai/gpt-5-codex" → "gpt-5-codex") - const modelId = model.includes("/") ? model.split("/").pop()! : model; - - // Try explicit model map first (handles all known model variants) - const mappedModel = getNormalizedModel(modelId); - if (mappedModel) { - return mappedModel; - } - - // Fallback: Pattern-based matching for unknown/custom model names - // This preserves backwards compatibility with old verbose names - // like "GPT 5 Codex Low (ChatGPT Subscription)" - const normalized = modelId.toLowerCase(); - - // Priority order for pattern matching (most specific first): - // 1. GPT-5.2 Codex (newest codex model) - if ( - normalized.includes("gpt-5.2-codex") || - normalized.includes("gpt 5.2 codex") - ) { - return "gpt-5.2-codex"; - } - - // 2. GPT-5.2 (general purpose) - if (normalized.includes("gpt-5.2") || normalized.includes("gpt 5.2")) { - return "gpt-5.2"; - } - - // 3. GPT-5.1 Codex Max - if ( - normalized.includes("gpt-5.1-codex-max") || - normalized.includes("gpt 5.1 codex max") - ) { - return "gpt-5.1-codex-max"; - } - - // 4. GPT-5.1 Codex Mini - if ( - normalized.includes("gpt-5.1-codex-mini") || - normalized.includes("gpt 5.1 codex mini") - ) { - return "gpt-5.1-codex-mini"; - } - - // 5. Legacy Codex Mini - if ( - normalized.includes("codex-mini-latest") || - normalized.includes("gpt-5-codex-mini") || - normalized.includes("gpt 5 codex mini") - ) { - return "codex-mini-latest"; - } - - // 6. GPT-5.1 Codex - if ( - normalized.includes("gpt-5.1-codex") || - normalized.includes("gpt 5.1 codex") - ) { - return "gpt-5.1-codex"; - } - - // 7. GPT-5.1 (general-purpose) - if (normalized.includes("gpt-5.1") || normalized.includes("gpt 5.1")) { - return "gpt-5.1"; - } - - // 8. GPT-5 Codex family (any variant with "codex") - if (normalized.includes("codex")) { - return "gpt-5.1-codex"; - } - - // 9. GPT-5 family (any variant) - default to 5.1 as 5 is being phased out - if (normalized.includes("gpt-5") || normalized.includes("gpt 5")) { - return "gpt-5.1"; - } - - // Default fallback - use gpt-5.1 as gpt-5 is being phased out - return "gpt-5.1"; + if (!model) return "gpt-5.1"; + + // Strip provider prefix if present (e.g., "openai/gpt-5-codex" → "gpt-5-codex") + const modelId = model.includes("/") ? model.split("/").pop()! : model; + + // Try explicit model map first (handles all known model variants) + const mappedModel = getNormalizedModel(modelId); + if (mappedModel) { + return mappedModel; + } + + // Fallback: Pattern-based matching for unknown/custom model names + // This preserves backwards compatibility with old verbose names + // like "GPT 5 Codex Low (ChatGPT Subscription)" + const normalized = modelId.toLowerCase(); + + // Priority order for pattern matching (most specific first): + // 1. GPT-5.2 Codex (newest codex model) + if ( + normalized.includes("gpt-5.2-codex") || + normalized.includes("gpt 5.2 codex") + ) { + return "gpt-5.2-codex"; + } + + // 2. GPT-5.2 (general purpose) + if (normalized.includes("gpt-5.2") || normalized.includes("gpt 5.2")) { + return "gpt-5.2"; + } + + // 3. GPT-5.1 Codex Max + if ( + normalized.includes("gpt-5.1-codex-max") || + normalized.includes("gpt 5.1 codex max") + ) { + return "gpt-5.1-codex-max"; + } + + // 4. GPT-5.1 Codex Mini + if ( + normalized.includes("gpt-5.1-codex-mini") || + normalized.includes("gpt 5.1 codex mini") + ) { + return "gpt-5.1-codex-mini"; + } + + // 5. Legacy Codex Mini + if ( + normalized.includes("codex-mini-latest") || + normalized.includes("gpt-5-codex-mini") || + normalized.includes("gpt 5 codex mini") + ) { + return "codex-mini-latest"; + } + + // 6. GPT-5.1 Codex + if ( + normalized.includes("gpt-5.1-codex") || + normalized.includes("gpt 5.1 codex") + ) { + return "gpt-5.1-codex"; + } + + // 7. GPT-5.1 (general-purpose) + if (normalized.includes("gpt-5.1") || normalized.includes("gpt 5.1")) { + return "gpt-5.1"; + } + + // 8. GPT-5 Codex family (any variant with "codex") + if (normalized.includes("codex")) { + return "gpt-5.1-codex"; + } + + // 9. GPT-5 family (any variant) - default to 5.1 as 5 is being phased out + if (normalized.includes("gpt-5") || normalized.includes("gpt 5")) { + return "gpt-5.1"; + } + + // Default fallback - use gpt-5.1 as gpt-5 is being phased out + return "gpt-5.1"; } /** @@ -111,14 +112,14 @@ export function normalizeModel(model: string | undefined): string { * @returns Merged configuration for this model */ export function getModelConfig( - modelName: string, - userConfig: UserConfig = { global: {}, models: {} }, + modelName: string, + userConfig: UserConfig = { global: {}, models: {} }, ): ConfigOptions { - const globalOptions = userConfig.global || {}; - const modelOptions = userConfig.models?.[modelName]?.options || {}; + const globalOptions = userConfig.global || {}; + const modelOptions = userConfig.models?.[modelName]?.options || {}; - // Model-specific options override global options - return { ...globalOptions, ...modelOptions }; + // Model-specific options override global options + return { ...globalOptions, ...modelOptions }; } /** @@ -130,103 +131,115 @@ export function getModelConfig( * - Codex CLI has been thoroughly tested against this backend * * @param originalModel - Original model name before normalization - * @param userConfig - User configuration object + * @param userConfig - User configuration from plugin config file + * @param externalConfig - External configuration from OpenCode's providerOptions (variant selection) * @returns Reasoning configuration */ export function getReasoningConfig( - modelName: string | undefined, - userConfig: ConfigOptions = {}, + modelName: string | undefined, + userConfig: ConfigOptions = {}, + externalConfig: ConfigOptions = {}, ): ReasoningConfig { - const normalizedName = modelName?.toLowerCase() ?? ""; - - // GPT-5.2 Codex is the newest codex model (supports xhigh, but not "none") - const isGpt52Codex = - normalizedName.includes("gpt-5.2-codex") || - normalizedName.includes("gpt 5.2 codex"); - - // GPT-5.2 general purpose (not codex variant) - const isGpt52General = - (normalizedName.includes("gpt-5.2") || normalizedName.includes("gpt 5.2")) && - !isGpt52Codex; - const isCodexMax = - normalizedName.includes("codex-max") || - normalizedName.includes("codex max"); - const isCodexMini = - normalizedName.includes("codex-mini") || - normalizedName.includes("codex mini") || - normalizedName.includes("codex_mini") || - normalizedName.includes("codex-mini-latest"); - const isCodex = normalizedName.includes("codex") && !isCodexMini; - const isLightweight = - !isCodexMini && - (normalizedName.includes("nano") || - normalizedName.includes("mini")); - - // GPT-5.1 general purpose (not codex variants) - supports "none" per OpenAI API docs - const isGpt51General = - (normalizedName.includes("gpt-5.1") || normalizedName.includes("gpt 5.1")) && - !isCodex && - !isCodexMax && - !isCodexMini; - - // GPT 5.2, GPT 5.2 Codex, and Codex Max support xhigh reasoning - const supportsXhigh = isGpt52General || isGpt52Codex || isCodexMax; - - // GPT 5.1 general and GPT 5.2 general support "none" reasoning per: - // - OpenAI API docs: "gpt-5.1 defaults to none, supports: none, low, medium, high" - // - Codex CLI: ReasoningEffort enum includes None variant (codex-rs/protocol/src/openai_models.rs) - // - Codex CLI: docs/config.md lists "none" as valid for model_reasoning_effort - // - gpt-5.2 (being newer) also supports: none, low, medium, high, xhigh - // - Codex models (including GPT-5.2 Codex) do NOT support "none" - const supportsNone = isGpt52General || isGpt51General; - - // Default based on model type (Codex CLI defaults) - // Note: OpenAI docs say gpt-5.1 defaults to "none", but we default to "medium" - // for better coding assistance unless user explicitly requests "none" - const defaultEffort: ReasoningConfig["effort"] = isCodexMini - ? "medium" - : supportsXhigh - ? "high" - : isLightweight - ? "minimal" - : "medium"; - - // Get user-requested effort - let effort = userConfig.reasoningEffort || defaultEffort; - - if (isCodexMini) { - if (effort === "minimal" || effort === "low" || effort === "none") { - effort = "medium"; - } - if (effort === "xhigh") { - effort = "high"; - } - if (effort !== "high" && effort !== "medium") { - effort = "medium"; - } - } - - // For models that don't support xhigh, downgrade to high - if (!supportsXhigh && effort === "xhigh") { - effort = "high"; - } - - // For models that don't support "none", upgrade to "low" - // (Codex models don't support "none" - only GPT-5.1 and GPT-5.2 general purpose do) - if (!supportsNone && effort === "none") { - effort = "low"; - } - - // Normalize "minimal" to "low" for Codex families - // Codex CLI presets are low/medium/high (or xhigh for Codex Max / GPT-5.2 Codex) - if (isCodex && effort === "minimal") { - effort = "low"; - } - - return { - effort, - summary: userConfig.reasoningSummary || "auto", // Changed from "detailed" to match Codex CLI - }; + const normalizedName = modelName?.toLowerCase() ?? ""; + + // GPT-5.2 Codex is the newest codex model (supports xhigh, but not "none") + const isGpt52Codex = + normalizedName.includes("gpt-5.2-codex") || + normalizedName.includes("gpt 5.2 codex"); + + // GPT-5.2 general purpose (not codex variant) + const isGpt52General = + (normalizedName.includes("gpt-5.2") || + normalizedName.includes("gpt 5.2")) && + !isGpt52Codex; + const isCodexMax = + normalizedName.includes("codex-max") || + normalizedName.includes("codex max"); + const isCodexMini = + normalizedName.includes("codex-mini") || + normalizedName.includes("codex mini") || + normalizedName.includes("codex_mini") || + normalizedName.includes("codex-mini-latest"); + const isCodex = normalizedName.includes("codex") && !isCodexMini; + const isLightweight = + !isCodexMini && + (normalizedName.includes("nano") || normalizedName.includes("mini")); + + // GPT-5.1 general purpose (not codex variants) - supports "none" per OpenAI API docs + const isGpt51General = + (normalizedName.includes("gpt-5.1") || + normalizedName.includes("gpt 5.1")) && + !isCodex && + !isCodexMax && + !isCodexMini; + + // GPT 5.2, GPT 5.2 Codex, and Codex Max support xhigh reasoning + const supportsXhigh = isGpt52General || isGpt52Codex || isCodexMax; + + // GPT 5.1 general and GPT 5.2 general support "none" reasoning per: + // - OpenAI API docs: "gpt-5.1 defaults to none, supports: none, low, medium, high" + // - Codex CLI: ReasoningEffort enum includes None variant (codex-rs/protocol/src/openai_models.rs) + // - Codex CLI: docs/config.md lists "none" as valid for model_reasoning_effort + // - gpt-5.2 (being newer) also supports: none, low, medium, high, xhigh + // - Codex models (including GPT-5.2 Codex) do NOT support "none" + const supportsNone = isGpt52General || isGpt51General; + + // Default based on model type (Codex CLI defaults) + // Note: OpenAI docs say gpt-5.1 defaults to "none", but we default to "medium" + // for better coding assistance unless user explicitly requests "none" + const defaultEffort: ReasoningConfig["effort"] = isCodexMini + ? "medium" + : supportsXhigh + ? "high" + : isLightweight + ? "minimal" + : "medium"; + + // Priority: externalConfig (from OpenCode variant) > userConfig > computed defaults + // This allows variant selection via Ctrl+T to override plugin defaults + let effort = + externalConfig.reasoningEffort ?? + userConfig.reasoningEffort ?? + defaultEffort; + + if (isCodexMini) { + if (effort === "minimal" || effort === "low" || effort === "none") { + effort = "medium"; + } + if (effort === "xhigh") { + effort = "high"; + } + if (effort !== "high" && effort !== "medium") { + effort = "medium"; + } + } + + // For models that don't support xhigh, downgrade to high + if (!supportsXhigh && effort === "xhigh") { + effort = "high"; + } + + // For models that don't support "none", upgrade to "low" + // (Codex models don't support "none" - only GPT-5.1 and GPT-5.2 general purpose do) + if (!supportsNone && effort === "none") { + effort = "low"; + } + + // Normalize "minimal" to "low" for ALL GPT-5.1/5.2 models + // "minimal" is NOT supported by any gpt-5.1 or gpt-5.2 model (general or codex) + // It was deprecated when transitioning from gpt-5 to gpt-5.1 + if (effort === "minimal") { + effort = "low"; + } + + // Priority for summary: externalConfig > userConfig > "auto" (Codex CLI default) + const summary = + externalConfig.reasoningSummary ?? userConfig.reasoningSummary ?? "auto"; + + return { + effort, + summary, + }; } /** @@ -252,26 +265,26 @@ export function getReasoningConfig( * @returns Filtered input array compatible with Codex API */ export function filterInput( - input: InputItem[] | undefined, + input: InputItem[] | undefined, ): InputItem[] | undefined { - if (!Array.isArray(input)) return input; - - return input - .filter((item) => { - // Remove AI SDK constructs not supported by Codex API - if (item.type === "item_reference") { - return false; // AI SDK only - references server state - } - return true; // Keep all other items - }) - .map((item) => { - // Strip IDs from all items (Codex API stateless mode) - if (item.id) { - const { id, ...itemWithoutId } = item; - return itemWithoutId as InputItem; - } - return item; - }); + if (!Array.isArray(input)) return input; + + return input + .filter((item) => { + // Remove AI SDK constructs not supported by Codex API + if (item.type === "item_reference") { + return false; // AI SDK only - references server state + } + return true; // Keep all other items + }) + .map((item) => { + // Strip IDs from all items (Codex API stateless mode) + if (item.id) { + const { id, ...itemWithoutId } = item; + return itemWithoutId as InputItem; + } + return item; + }); } /** @@ -282,74 +295,189 @@ export function filterInput( * @returns True if this is the OpenCode system prompt */ export function isOpenCodeSystemPrompt( - item: InputItem, - cachedPrompt: string | null, + item: InputItem, + cachedPrompt: string | null, ): boolean { - const isSystemRole = item.role === "developer" || item.role === "system"; - if (!isSystemRole) return false; - - const getContentText = (item: InputItem): string => { - if (typeof item.content === "string") { - return item.content; - } - if (Array.isArray(item.content)) { - return item.content - .filter((c) => c.type === "input_text" && c.text) - .map((c) => c.text) - .join("\n"); - } - return ""; - }; - - const contentText = getContentText(item); - if (!contentText) return false; - - // Primary check: Compare against cached OpenCode prompt - if (cachedPrompt) { - // Exact match (trim whitespace for comparison) - if (contentText.trim() === cachedPrompt.trim()) { - return true; - } - - // Partial match: Check if first 200 chars match (handles minor variations) - const contentPrefix = contentText.trim().substring(0, 200); - const cachedPrefix = cachedPrompt.trim().substring(0, 200); - if (contentPrefix === cachedPrefix) { - return true; - } + const isSystemRole = item.role === "developer" || item.role === "system"; + if (!isSystemRole) return false; + + const getContentText = (item: InputItem): string => { + if (typeof item.content === "string") { + return item.content; + } + if (Array.isArray(item.content)) { + return item.content + .filter((c) => c.type === "input_text" && c.text) + .map((c) => c.text) + .join("\n"); + } + return ""; + }; + + const contentText = getContentText(item); + if (!contentText) return false; + + // Primary check: Compare against cached OpenCode prompt + if (cachedPrompt) { + // Exact match (trim whitespace for comparison) + if (contentText.trim() === cachedPrompt.trim()) { + return true; + } + + // Partial match: Check if first 200 chars match (handles minor variations) + const contentPrefix = contentText.trim().substring(0, 200); + const cachedPrefix = cachedPrompt.trim().substring(0, 200); + if (contentPrefix === cachedPrefix) { + return true; + } + } + + // Fallback check: Known OpenCode prompt signature (for safety) + // This catches the prompt even if cache fails + return contentText.startsWith("You are a coding agent running in"); +} + +/** + * Extract content text from an input item + * @param item - Input item + * @returns Content as string + */ +function getContentText(item: InputItem): string { + if (typeof item.content === "string") { + return item.content; } + if (Array.isArray(item.content)) { + return item.content + .filter((c) => c.type === "input_text" && c.text) + .map((c) => c.text) + .join("\n"); + } + return ""; +} - // Fallback check: Known OpenCode prompt signature (for safety) - // This catches the prompt even if cache fails - return contentText.startsWith("You are a coding agent running in"); +/** + * Extract AGENTS.md content from a concatenated OpenCode message + * + * OpenCode concatenates multiple pieces into a single developer message: + * 1. Base codex.txt prompt (starts with "You are a coding agent running in...") + * 2. Environment info + * 3. block + * 4. AGENTS.md content (prefixed with "Instructions from: /path/to/AGENTS.md") + * + * This function extracts the AGENTS.md portions so they can be preserved + * when filtering out the OpenCode base prompt. + * + * @param contentText - The full content text of the message + * @returns The AGENTS.md content if found, null otherwise + */ +function extractAgentsMdContent(contentText: string): string | null { + const marker = "Instructions from:"; + const idx = contentText.indexOf(marker); + if (idx > 0) { + return contentText.slice(idx).trimStart(); + } + return null; } /** * Filter out OpenCode system prompts from input * Used in CODEX_MODE to replace OpenCode prompts with Codex-OpenCode bridge + * + * When OpenCode sends a concatenated message containing both the base prompt + * AND AGENTS.md content, this function extracts and preserves the AGENTS.md + * portions while filtering out the OpenCode base prompt. + * * @param input - Input array - * @returns Input array without OpenCode system prompts + * @returns Input array without OpenCode system prompts (but with AGENTS.md preserved) */ export async function filterOpenCodeSystemPrompts( - input: InputItem[] | undefined, + input: InputItem[] | undefined, ): Promise { - if (!Array.isArray(input)) return input; - - // Fetch cached OpenCode prompt for verification - let cachedPrompt: string | null = null; - try { - cachedPrompt = await getOpenCodeCodexPrompt(); - } catch { - // If fetch fails, fallback to text-based detection only - // This is safe because we still have the "starts with" check - } - - return input.filter((item) => { - // Keep user messages - if (item.role === "user") return true; - // Filter out OpenCode system prompts - return !isOpenCodeSystemPrompt(item, cachedPrompt); - }); + // DEBUG: Write immediately at function entry + try { + const fs = await import("node:fs"); + fs.writeFileSync("/tmp/filter-entry.log", `ENTERED filterOpenCodeSystemPrompts at ${new Date().toISOString()}\ninput type: ${typeof input}\nis array: ${Array.isArray(input)}\nlength: ${Array.isArray(input) ? input.length : 'N/A'}\n`); + } catch (e) { + // ignore + } + if (!Array.isArray(input)) return input; + + // Fetch cached OpenCode prompt for verification + let cachedPrompt: string | null = null; + try { + cachedPrompt = await getOpenCodeCodexPrompt(); + } catch { + // If fetch fails, fallback to text-based detection only + } + + const result: InputItem[] = []; + const customInstructions: InputItem[] = []; + + // Write debug to file since console might be swallowed + const debugLines: string[] = []; + debugLines.push(`Processing ${input.length} items, cachedPrompt: ${cachedPrompt ? 'loaded' : 'null'}`); + + for (const item of input) { + // Keep user messages as-is + if (item.role === "user") { + result.push(item); + continue; + } + + const contentText = getContentText(item); + debugLines.push(`Item role=${item.role}, contentLength=${contentText.length}, startsWithAgent=${contentText.startsWith("You are a coding agent")}`); + + // Extract "Instructions from:" blocks from ANY developer message + // This ensures AGENTS.md is preserved regardless of prompt detection + const agentsMdContent = extractAgentsMdContent(contentText); + debugLines.push(`agentsMdContent found: ${agentsMdContent ? 'yes, length=' + agentsMdContent.length : 'no'}`); + if (agentsMdContent) { + customInstructions.push({ + type: "message", + role: "developer", + content: [ + { + type: "input_text", + text: agentsMdContent, + }, + ], + }); + } + + // Check if this is an OpenCode system prompt - filter it out + if (isOpenCodeSystemPrompt(item, cachedPrompt)) { + // Don't add the original OpenCode prompt (AGENTS.md already extracted above) + continue; + } + + // For non-OpenCode messages that had "Instructions from:", + // we've extracted them above - don't keep the original if it was just instructions + if (agentsMdContent && contentText.trim().startsWith("Instructions from:")) { + // This was a standalone AGENTS.md message, already extracted + continue; + } + + // Keep all other messages + result.push(item); + } + + // Insert custom instructions before user messages + // Order after bridge prepend: [bridge, custom_instructions, user_messages] + debugLines.push(`Returning ${customInstructions.length} customInstructions + ${result.length} result items`); + + // Write debug to /tmp (simple, always writable) + try { + const fs = await import("node:fs"); + fs.appendFileSync("/tmp/agents-extraction-debug.log", `\n=== ${new Date().toISOString()} ===\n` + debugLines.join("\n") + "\n"); + } catch (e) { + // Try another approach if fs fails + try { + const { execSync } = await import("node:child_process"); + execSync(`echo "${debugLines.join("\\n")}" >> /tmp/agents-extraction-debug2.log`); + } catch { /* ignore */ } + } + + return [...customInstructions, ...result]; } /** @@ -359,23 +487,23 @@ export async function filterOpenCodeSystemPrompts( * @returns Input array with bridge message prepended if needed */ export function addCodexBridgeMessage( - input: InputItem[] | undefined, - hasTools: boolean, + input: InputItem[] | undefined, + hasTools: boolean, ): InputItem[] | undefined { - if (!hasTools || !Array.isArray(input)) return input; - - const bridgeMessage: InputItem = { - type: "message", - role: "developer", - content: [ - { - type: "input_text", - text: CODEX_OPENCODE_BRIDGE, - }, - ], - }; - - return [bridgeMessage, ...input]; + if (!hasTools || !Array.isArray(input)) return input; + + const bridgeMessage: InputItem = { + type: "message", + role: "developer", + content: [ + { + type: "input_text", + text: CODEX_OPENCODE_BRIDGE, + }, + ], + }; + + return [bridgeMessage, ...input]; } /** @@ -385,23 +513,79 @@ export function addCodexBridgeMessage( * @returns Input array with tool remap message prepended if needed */ export function addToolRemapMessage( - input: InputItem[] | undefined, - hasTools: boolean, + input: InputItem[] | undefined, + hasTools: boolean, ): InputItem[] | undefined { - if (!hasTools || !Array.isArray(input)) return input; - - const toolRemapMessage: InputItem = { - type: "message", - role: "developer", - content: [ - { - type: "input_text", - text: TOOL_REMAP_MESSAGE, - }, - ], - }; - - return [toolRemapMessage, ...input]; + if (!hasTools || !Array.isArray(input)) return input; + + const toolRemapMessage: InputItem = { + type: "message", + role: "developer", + content: [ + { + type: "input_text", + text: TOOL_REMAP_MESSAGE, + }, + ], + }; + + return [toolRemapMessage, ...input]; +} + +/** + * Validate and normalize reasoning effort based on model-specific constraints + * This is used when the AI SDK already provides a reasoning effort from variant selection, + * but we need to ensure it's valid for the specific model (e.g., downgrade xhigh to high) + * + * @param effort - The reasoning effort to validate + * @param modelName - The normalized model name + * @returns Validated effort with model-specific fixes applied + */ +function validateReasoningEffort( + effort: string, + modelName: string, +): ReasoningConfig["effort"] { + const normalizedName = modelName.toLowerCase(); + const isCodexMini = normalizedName.includes("codex-mini"); + const isCodex = normalizedName.includes("codex") && !isCodexMini; + const supportsXhigh = + normalizedName.includes("gpt-5.2") || normalizedName.includes("codex-max"); + const supportsNone = + (normalizedName.includes("gpt-5.2") || + normalizedName.includes("gpt-5.1")) && + !isCodex; + + let result = effort as ReasoningConfig["effort"]; + + // Codex Mini only supports medium and high + if (isCodexMini) { + if (result === "none" || result === "minimal" || result === "low") { + result = "medium"; + } else if (result === "xhigh") { + result = "high"; + } else if (result !== "high" && result !== "medium") { + result = "medium"; + } + } + + // For models that don't support xhigh, downgrade to high + if (!supportsXhigh && result === "xhigh") { + result = "high"; + } + + // For models that don't support "none", upgrade to "low" + if (!supportsNone && result === "none") { + result = "low"; + } + + // Normalize "minimal" to "low" for ALL GPT-5.1/5.2 models + // "minimal" is NOT supported by any gpt-5.1 or gpt-5.2 model (general or codex) + // It was deprecated when transitioning from gpt-5 to gpt-5.1 + if (result === "minimal") { + result = "low"; + } + + return result; } /** @@ -412,6 +596,13 @@ export function addToolRemapMessage( * - opencode excludes gpt-5-codex from reasoning configuration * - This plugin uses store=false (stateless), requiring encrypted reasoning content * + * VARIANT SELECTION FLOW: + * - When user selects a reasoning variant via Ctrl+T or --model=openai/gpt-5.2:high, + * OpenCode passes this through the AI SDK which sets body.reasoning.effort/summary + * - We check if body.reasoning already exists (from AI SDK/variant selection) + * - If yes, we RESPECT it (don't override user's variant selection) + * - If no, we apply plugin computed defaults + * * @param body - Original request body * @param codexInstructions - Codex system instructions * @param userConfig - User configuration from loader @@ -419,134 +610,190 @@ export function addToolRemapMessage( * @returns Transformed request body */ export async function transformRequestBody( - body: RequestBody, - codexInstructions: string, - userConfig: UserConfig = { global: {}, models: {} }, - codexMode = true, + body: RequestBody, + codexInstructions: string, + userConfig: UserConfig = { global: {}, models: {} }, + codexMode = true, ): Promise { - const originalModel = body.model; - const normalizedModel = normalizeModel(body.model); - - // Get model-specific configuration using ORIGINAL model name (config key) - // This allows per-model options like "gpt-5-codex-low" to work correctly - const lookupModel = originalModel || normalizedModel; - const modelConfig = getModelConfig(lookupModel, userConfig); - - // Debug: Log which config was resolved - logDebug( - `Model config lookup: "${lookupModel}" → normalized to "${normalizedModel}" for API`, - { - hasModelSpecificConfig: !!userConfig.models?.[lookupModel], - resolvedConfig: modelConfig, - }, - ); - - // Normalize model name for API call - body.model = normalizedModel; - - // Codex required fields - // ChatGPT backend REQUIRES store=false (confirmed via testing) - body.store = false; - // Always set stream=true for API - response handling detects original intent - body.stream = true; - body.instructions = codexInstructions; - - // Prompt caching relies on the host providing a stable prompt_cache_key - // (OpenCode passes its session identifier). We no longer synthesize one here. - - // Filter and transform input - if (body.input && Array.isArray(body.input)) { - // Debug: Log original input message IDs before filtering - const originalIds = body.input - .filter((item) => item.id) - .map((item) => item.id); - if (originalIds.length > 0) { - logDebug( - `Filtering ${originalIds.length} message IDs from input:`, - originalIds, - ); - } - - body.input = filterInput(body.input); - - // Debug: Verify all IDs were removed - const remainingIds = (body.input || []) - .filter((item) => item.id) - .map((item) => item.id); - if (remainingIds.length > 0) { - logWarn( - `WARNING: ${remainingIds.length} IDs still present after filtering:`, - remainingIds, - ); - } else if (originalIds.length > 0) { - logDebug(`Successfully removed all ${originalIds.length} message IDs`); - } - - if (codexMode) { - // CODEX_MODE: Remove OpenCode system prompt, add bridge prompt - body.input = await filterOpenCodeSystemPrompts(body.input); - body.input = addCodexBridgeMessage(body.input, !!body.tools); - } else { - // DEFAULT MODE: Keep original behavior with tool remap message - body.input = addToolRemapMessage(body.input, !!body.tools); - } - - // Handle orphaned function_call_output items (where function_call was an item_reference that got filtered) - // Instead of removing orphans (which causes infinite loops as LLM loses tool results), - // convert them to messages to preserve context while avoiding API errors - if (body.input) { - const functionCallIds = new Set( - body.input - .filter((item) => item.type === "function_call" && item.call_id) - .map((item) => item.call_id), - ); - body.input = body.input.map((item) => { - if (item.type === "function_call_output" && !functionCallIds.has(item.call_id)) { - const toolName = typeof (item as any).name === "string" ? (item as any).name : "tool"; - const callId = (item as any).call_id ?? ""; - let text: string; - try { - const out = (item as any).output; - text = typeof out === "string" ? out : JSON.stringify(out); - } catch { - text = String((item as any).output ?? ""); - } - if (text.length > 16000) { - text = text.slice(0, 16000) + "\n...[truncated]"; - } - return { - type: "message", - role: "assistant", - content: `[Previous ${toolName} result; call_id=${callId}]: ${text}`, - } as InputItem; - } - return item; - }); - } - } - - // Configure reasoning (use normalized model family + model-specific config) - const reasoningConfig = getReasoningConfig(normalizedModel, modelConfig); - body.reasoning = { - ...body.reasoning, - ...reasoningConfig, - }; - - // Configure text verbosity (support user config) - // Default: "medium" (matches Codex CLI default for all GPT-5 models) - body.text = { - ...body.text, - verbosity: modelConfig.textVerbosity || "medium", - }; - - // Add include for encrypted reasoning content - // Default: ["reasoning.encrypted_content"] (required for stateless operation with store=false) - // This allows reasoning context to persist across turns without server-side storage - body.include = modelConfig.include || ["reasoning.encrypted_content"]; - - // Remove unsupported parameters - body.max_output_tokens = undefined; - body.max_completion_tokens = undefined; - - return body; + const originalModel = body.model; + const normalizedModel = normalizeModel(body.model); + + // Get model-specific configuration using ORIGINAL model name (config key) + // This allows per-model options like "gpt-5-codex-low" to work correctly + const lookupModel = originalModel || normalizedModel; + const modelConfig = getModelConfig(lookupModel, userConfig); + + // Debug: Log which config was resolved + logDebug( + `Model config lookup: "${lookupModel}" → normalized to "${normalizedModel}" for API`, + { + hasModelSpecificConfig: !!userConfig.models?.[lookupModel], + resolvedConfig: modelConfig, + }, + ); + + // Normalize model name for API call + body.model = normalizedModel; + + // Codex required fields + // ChatGPT backend REQUIRES store=false (confirmed via testing) + body.store = false; + // Always set stream=true for API - response handling detects original intent + body.stream = true; + body.instructions = codexInstructions; + + // Prompt caching relies on the host providing a stable prompt_cache_key + // (OpenCode passes its session identifier). We no longer synthesize one here. + + // Filter and transform input + if (body.input && Array.isArray(body.input)) { + // Debug: Log original input message IDs before filtering + const originalIds = body.input! + .filter((item) => item.id) + .map((item) => item.id); + if (originalIds.length > 0) { + logDebug( + `Filtering ${originalIds.length} message IDs from input:`, + originalIds, + ); + } + + body.input = filterInput(body.input); + + // Debug: Verify all IDs were removed + const remainingIds = (body.input || []) + .filter((item) => item.id) + .map((item) => item.id); + if (remainingIds.length > 0) { + logWarn( + `WARNING: ${remainingIds.length} IDs still present after filtering:`, + remainingIds, + ); + } else if (originalIds.length > 0) { + logDebug(`Successfully removed all ${originalIds.length} message IDs`); + } + + // DEBUG: Write codexMode status to file + const fs = await import("node:fs"); + fs.appendFileSync( + "/tmp/codex-transform-debug.log", + `[${new Date().toISOString()}] codexMode=${codexMode}, hasInput=${!!body.input}, inputLength=${body.input?.length}\n` + ); + + if (codexMode) { + // CODEX_MODE: Remove OpenCode system prompt, add bridge prompt + body.input = await filterOpenCodeSystemPrompts(body.input); + body.input = addCodexBridgeMessage(body.input, !!body.tools); + } else { + // DEFAULT MODE: Keep original behavior with tool remap message + body.input = addToolRemapMessage(body.input, !!body.tools); + } + + // Handle orphaned function_call_output items (where function_call was an item_reference that got filtered) + // Instead of removing orphans (which causes infinite loops as LLM loses tool results), + // convert them to messages to preserve context while avoiding API errors + if (body.input) { + const functionCallIds = new Set( + body.input! + .filter((item) => item.type === "function_call" && item.call_id) + .map((item) => item.call_id), + ); + body.input = body.input!.map((item) => { + if ( + item.type === "function_call_output" && + !functionCallIds.has(item.call_id) + ) { + const toolName = + typeof (item as any).name === "string" + ? (item as any).name + : "tool"; + const callId = (item as any).call_id ?? ""; + let text: string; + try { + const out = (item as any).output; + text = typeof out === "string" ? out : JSON.stringify(out); + } catch { + text = String((item as any).output ?? ""); + } + if (text.length > 16000) { + text = text.slice(0, 16000) + "\n...[truncated]"; + } + return { + type: "message", + role: "assistant", + content: `[Previous ${toolName} result; call_id=${callId}]: ${text}`, + } as InputItem; + } + return item; + }); + } + } + + // Check if reasoning config is already set by AI SDK (from Ctrl+T variant selection) + // or by providerOptions (legacy/opencode variant config) + // Priority: body.reasoning > providerOptions > user config > plugin defaults + const existingEffort = + body.reasoning?.effort ?? body.providerOptions?.openai?.reasoningEffort; + const existingSummary = + body.reasoning?.summary ?? body.providerOptions?.openai?.reasoningSummary; + + logDebug("Checking for existing reasoning config from AI SDK/variant", { + existingEffort, + existingSummary, + fromBodyReasoning: !!body.reasoning?.effort, + fromProviderOptions: !!body.providerOptions?.openai?.reasoningEffort, + normalizedModel, + }); + + // If AI SDK or providerOptions already set reasoning config, respect it + // but validate it against model-specific constraints (e.g., xhigh → high for non-supporting models) + if (existingEffort) { + logDebug("Respected existing reasoning effort from AI SDK/variant", { + effort: existingEffort, + }); + + // Validate and fix the existing effort for model-specific constraints + const validatedEffort = validateReasoningEffort( + existingEffort!, + normalizedModel, + ); + + body.reasoning = { + ...body.reasoning, + effort: validatedEffort, + summary: existingSummary ?? "auto", + }; + logDebug("Validated existing reasoning config for model constraints", { + original: existingEffort, + validated: validatedEffort, + }); + } else { + // No existing config, apply plugin computed defaults + const reasoningConfig = getReasoningConfig(normalizedModel, modelConfig); + body.reasoning = { + ...body.reasoning, + ...reasoningConfig, + }; + logDebug("Applied plugin computed reasoning config", body.reasoning); + } + + // Configure text verbosity + // Priority: body.text.verbosity > providerOptions.textVerbosity > user config > plugin default + const existingTextVerbosity = + body.text?.verbosity ?? body.providerOptions?.openai?.textVerbosity; + body.text = { + ...body.text, + verbosity: existingTextVerbosity ?? modelConfig.textVerbosity ?? "medium", + }; + + // Add include for encrypted reasoning content + // Default: ["reasoning.encrypted_content"] (required for stateless operation with store=false) + // This allows reasoning context to persist across turns without server-side storage + body.include = modelConfig.include || ["reasoning.encrypted_content"]; + + // Remove unsupported parameters + body.max_output_tokens = undefined; + body.max_completion_tokens = undefined; + + return body; } diff --git a/lib/types.ts b/lib/types.ts index 80c8b02..8373a50 100644 --- a/lib/types.ts +++ b/lib/types.ts @@ -18,7 +18,11 @@ export interface UserConfig { global: ConfigOptions; models: { [modelName: string]: { + id?: string; // Optional model ID (may come from config) options?: ConfigOptions; + variants?: { + [variantName: string]: ConfigOptions; + }; }; }; } @@ -137,6 +141,14 @@ export interface RequestBody { prompt_cache_key?: string; max_output_tokens?: number; max_completion_tokens?: number; + /** Provider options from OpenCode (includes variant configuration from modern config format) */ + providerOptions?: { + openai?: { + reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh"; + reasoningSummary?: "auto" | "concise" | "detailed" | "off" | "on"; + textVerbosity?: "low" | "medium" | "high"; + }; + }; [key: string]: unknown; } diff --git a/test/config.test.ts b/test/config.test.ts index e3ccfda..cb76ff0 100644 --- a/test/config.test.ts +++ b/test/config.test.ts @@ -1,151 +1,169 @@ -import { describe, it, expect } from 'vitest'; -import { getModelConfig, getReasoningConfig } from '../lib/request/request-transformer.js'; -import type { UserConfig } from '../lib/types.js'; - -describe('Configuration Parsing', () => { - const providerConfig = { - options: { - reasoningEffort: 'medium' as const, - reasoningSummary: 'auto' as const, - textVerbosity: 'medium' as const, - }, - models: { - 'gpt-5-codex': { - options: { - reasoningSummary: 'concise' as const, - }, - }, - 'gpt-5': { - options: { - reasoningEffort: 'high' as const, - }, - }, - }, - }; - - const userConfig: UserConfig = { - global: providerConfig.options || {}, - models: providerConfig.models || {}, - }; - - describe('getModelConfig', () => { - it('should merge global and model-specific config for gpt-5-codex', () => { - const codexConfig = getModelConfig('gpt-5-codex', userConfig); - - expect(codexConfig.reasoningEffort).toBe('medium'); // from global - expect(codexConfig.reasoningSummary).toBe('concise'); // from model override - expect(codexConfig.textVerbosity).toBe('medium'); // from global - }); - - it('should merge global and model-specific config for gpt-5', () => { - const gpt5Config = getModelConfig('gpt-5', userConfig); - - expect(gpt5Config.reasoningEffort).toBe('high'); // from model override - expect(gpt5Config.reasoningSummary).toBe('auto'); // from global - expect(gpt5Config.textVerbosity).toBe('medium'); // from global - }); - - it('should return empty config when no config provided', () => { - const emptyConfig = getModelConfig('gpt-5-codex', { global: {}, models: {} }); - - expect(emptyConfig).toEqual({}); - }); - }); - - describe('getReasoningConfig', () => { - it('should use user settings from merged config for gpt-5-codex', () => { - const codexConfig = getModelConfig('gpt-5-codex', userConfig); - const reasoningConfig = getReasoningConfig('gpt-5-codex', codexConfig); - - expect(reasoningConfig.effort).toBe('medium'); - expect(reasoningConfig.summary).toBe('concise'); - }); - - it('should return defaults when no config provided', () => { - const emptyConfig = getModelConfig('gpt-5-codex', { global: {}, models: {} }); - const defaultReasoning = getReasoningConfig('gpt-5-codex', emptyConfig); - - expect(defaultReasoning.effort).toBe('medium'); - expect(defaultReasoning.summary).toBe('auto'); - }); - - it('should use minimal effort for lightweight models (nano/mini)', () => { - const nanoReasoning = getReasoningConfig('gpt-5-nano', {}); - - expect(nanoReasoning.effort).toBe('minimal'); - expect(nanoReasoning.summary).toBe('auto'); - }); - - it('should normalize "minimal" to "low" for gpt-5-codex', () => { - const codexMinimalConfig = { reasoningEffort: 'minimal' as const }; - const codexMinimalReasoning = getReasoningConfig('gpt-5-codex', codexMinimalConfig); - - expect(codexMinimalReasoning.effort).toBe('low'); - expect(codexMinimalReasoning.summary).toBe('auto'); - }); - - it('should preserve "minimal" effort for non-codex models', () => { - const gpt5MinimalConfig = { reasoningEffort: 'minimal' as const }; - const gpt5MinimalReasoning = getReasoningConfig('gpt-5', gpt5MinimalConfig); - - expect(gpt5MinimalReasoning.effort).toBe('minimal'); - }); - - it('should handle high effort setting', () => { - const highConfig = { reasoningEffort: 'high' as const }; - const highReasoning = getReasoningConfig('gpt-5', highConfig); - - expect(highReasoning.effort).toBe('high'); - expect(highReasoning.summary).toBe('auto'); - }); - - it('should respect custom summary setting', () => { - const detailedConfig = { reasoningSummary: 'detailed' as const }; - const detailedReasoning = getReasoningConfig('gpt-5-codex', detailedConfig); - - expect(detailedReasoning.summary).toBe('detailed'); - }); - - it('should default codex-mini to medium effort', () => { - const codexMiniReasoning = getReasoningConfig('gpt-5-codex-mini', {}); - expect(codexMiniReasoning.effort).toBe('medium'); - }); - - it('should clamp codex-mini minimal/low to medium', () => { - const minimal = getReasoningConfig('gpt-5-codex-mini', { - reasoningEffort: 'minimal', - }); - const low = getReasoningConfig('gpt-5-codex-mini-high', { - reasoningEffort: 'low', - }); - - expect(minimal.effort).toBe('medium'); - expect(low.effort).toBe('medium'); - }); - - it('should keep codex-mini high effort when requested', () => { - const high = getReasoningConfig('codex-mini-latest', { - reasoningEffort: 'high', - }); - expect(high.effort).toBe('high'); - }); - }); - - describe('Model-specific behavior', () => { - it('should detect lightweight models correctly', () => { - const miniReasoning = getReasoningConfig('gpt-5-mini', {}); - expect(miniReasoning.effort).toBe('minimal'); - }); - - it('should detect codex models correctly', () => { - const codexConfig = { reasoningEffort: 'minimal' as const }; - const codexReasoning = getReasoningConfig('gpt-5-codex', codexConfig); - expect(codexReasoning.effort).toBe('low'); // normalized - }); - - it('should handle standard gpt-5 model', () => { - const gpt5Reasoning = getReasoningConfig('gpt-5', {}); - expect(gpt5Reasoning.effort).toBe('medium'); - }); - }); +import { describe, it, expect } from "vitest"; +import { + getModelConfig, + getReasoningConfig, +} from "../lib/request/request-transformer.js"; +import type { UserConfig } from "../lib/types.js"; + +describe("Configuration Parsing", () => { + const providerConfig = { + options: { + reasoningEffort: "medium" as const, + reasoningSummary: "auto" as const, + textVerbosity: "medium" as const, + }, + models: { + "gpt-5-codex": { + options: { + reasoningSummary: "concise" as const, + }, + }, + "gpt-5": { + options: { + reasoningEffort: "high" as const, + }, + }, + }, + }; + + const userConfig: UserConfig = { + global: providerConfig.options || {}, + models: providerConfig.models || {}, + }; + + describe("getModelConfig", () => { + it("should merge global and model-specific config for gpt-5-codex", () => { + const codexConfig = getModelConfig("gpt-5-codex", userConfig); + + expect(codexConfig.reasoningEffort).toBe("medium"); // from global + expect(codexConfig.reasoningSummary).toBe("concise"); // from model override + expect(codexConfig.textVerbosity).toBe("medium"); // from global + }); + + it("should merge global and model-specific config for gpt-5", () => { + const gpt5Config = getModelConfig("gpt-5", userConfig); + + expect(gpt5Config.reasoningEffort).toBe("high"); // from model override + expect(gpt5Config.reasoningSummary).toBe("auto"); // from global + expect(gpt5Config.textVerbosity).toBe("medium"); // from global + }); + + it("should return empty config when no config provided", () => { + const emptyConfig = getModelConfig("gpt-5-codex", { + global: {}, + models: {}, + }); + + expect(emptyConfig).toEqual({}); + }); + }); + + describe("getReasoningConfig", () => { + it("should use user settings from merged config for gpt-5-codex", () => { + const codexConfig = getModelConfig("gpt-5-codex", userConfig); + const reasoningConfig = getReasoningConfig("gpt-5-codex", codexConfig); + + expect(reasoningConfig.effort).toBe("medium"); + expect(reasoningConfig.summary).toBe("concise"); + }); + + it("should return defaults when no config provided", () => { + const emptyConfig = getModelConfig("gpt-5-codex", { + global: {}, + models: {}, + }); + const defaultReasoning = getReasoningConfig("gpt-5-codex", emptyConfig); + + expect(defaultReasoning.effort).toBe("medium"); + expect(defaultReasoning.summary).toBe("auto"); + }); + + it("should normalize minimal to low for lightweight models (nano/mini)", () => { + const nanoReasoning = getReasoningConfig("gpt-5-nano", {}); + + expect(nanoReasoning.effort).toBe("low"); + expect(nanoReasoning.summary).toBe("auto"); + }); + + it('should normalize "minimal" to "low" for gpt-5-codex', () => { + const codexMinimalConfig = { reasoningEffort: "minimal" as const }; + const codexMinimalReasoning = getReasoningConfig( + "gpt-5-codex", + codexMinimalConfig, + ); + + expect(codexMinimalReasoning.effort).toBe("low"); + expect(codexMinimalReasoning.summary).toBe("auto"); + }); + + it('should normalize "minimal" to "low" for all models (minimal not supported by gpt-5.1/5.2)', () => { + const gpt5MinimalConfig = { reasoningEffort: "minimal" as const }; + const gpt5MinimalReasoning = getReasoningConfig( + "gpt-5", + gpt5MinimalConfig, + ); + + expect(gpt5MinimalReasoning.effort).toBe("low"); + }); + + it("should handle high effort setting", () => { + const highConfig = { reasoningEffort: "high" as const }; + const highReasoning = getReasoningConfig("gpt-5", highConfig); + + expect(highReasoning.effort).toBe("high"); + expect(highReasoning.summary).toBe("auto"); + }); + + it("should respect custom summary setting", () => { + const detailedConfig = { reasoningSummary: "detailed" as const }; + const detailedReasoning = getReasoningConfig( + "gpt-5-codex", + detailedConfig, + ); + + expect(detailedReasoning.summary).toBe("detailed"); + }); + + it("should default codex-mini to medium effort", () => { + const codexMiniReasoning = getReasoningConfig("gpt-5-codex-mini", {}); + expect(codexMiniReasoning.effort).toBe("medium"); + }); + + it("should clamp codex-mini minimal/low to medium", () => { + const minimal = getReasoningConfig("gpt-5-codex-mini", { + reasoningEffort: "minimal", + }); + const low = getReasoningConfig("gpt-5-codex-mini-high", { + reasoningEffort: "low", + }); + + expect(minimal.effort).toBe("medium"); + expect(low.effort).toBe("medium"); + }); + + it("should keep codex-mini high effort when requested", () => { + const high = getReasoningConfig("codex-mini-latest", { + reasoningEffort: "high", + }); + expect(high.effort).toBe("high"); + }); + }); + + describe("Model-specific behavior", () => { + it("should normalize minimal to low for lightweight models", () => { + const miniReasoning = getReasoningConfig("gpt-5-mini", {}); + expect(miniReasoning.effort).toBe("low"); + }); + + it("should detect codex models correctly", () => { + const codexConfig = { reasoningEffort: "minimal" as const }; + const codexReasoning = getReasoningConfig("gpt-5-codex", codexConfig); + expect(codexReasoning.effort).toBe("low"); // normalized + }); + + it("should handle standard gpt-5 model", () => { + const gpt5Reasoning = getReasoningConfig("gpt-5", {}); + expect(gpt5Reasoning.effort).toBe("medium"); + }); + }); }); diff --git a/test/request-transformer.test.ts b/test/request-transformer.test.ts index e0fced8..101ca2d 100644 --- a/test/request-transformer.test.ts +++ b/test/request-transformer.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { describe, it, expect, beforeEach, afterEach } from "vitest"; import { normalizeModel, getModelConfig, @@ -13,6 +13,18 @@ import { TOOL_REMAP_MESSAGE } from '../lib/prompts/codex.js'; import { CODEX_OPENCODE_BRIDGE } from '../lib/prompts/codex-opencode-bridge.js'; import type { RequestBody, UserConfig, InputItem } from '../lib/types.js'; +// Helper to extract text from content (handles both string and array formats) +function getContentText(item: InputItem): string { + if (typeof item.content === 'string') return item.content; + if (Array.isArray(item.content)) { + return item.content + .filter((c): c is { type: 'input_text'; text: string } => c.type === 'input_text') + .map(c => c.text) + .join(''); + } + return ''; +} + describe('Request Transformer Module', () => { describe('normalizeModel', () => { // NOTE: All gpt-5 models now normalize to gpt-5.1 as gpt-5 is being phased out @@ -515,7 +527,8 @@ describe('Request Transformer Module', () => { const result = await filterOpenCodeSystemPrompts(input); // Should filter codex.txt but keep AGENTS.md expect(result).toHaveLength(2); - expect(result![0].content).toContain('AGENTS.md'); + const content0 = Array.isArray(result![0].content) ? (result![0].content as any)[0].text : result![0].content; + expect(content0).toContain('AGENTS.md'); expect(result![1].role).toBe('user'); }); @@ -537,13 +550,144 @@ describe('Request Transformer Module', () => { const result = await filterOpenCodeSystemPrompts(input); // Should filter first message (codex.txt) but keep second (env+AGENTS.md) expect(result).toHaveLength(2); - expect(result![0].content).toContain('AGENTS.md'); + const content0 = Array.isArray(result![0].content) ? (result![0].content as any)[0].text : result![0].content; + expect(content0).toContain('AGENTS.md'); expect(result![1].role).toBe('user'); }); it('should return undefined for undefined input', async () => { expect(await filterOpenCodeSystemPrompts(undefined)).toBeUndefined(); }); + + // Tests for concatenated messages (single message containing both prompt AND AGENTS.md) + // This is how OpenCode actually sends content (as of v1.0.164+). + // The tests above cover separate messages pattern which may also occur. + describe('concatenated messages (OpenCode v1.0.164+ pattern)', () => { + it('should extract and preserve AGENTS.md when concatenated with OpenCode prompt', async () => { + // OpenCode sends a SINGLE message containing: + // 1. Base codex.txt prompt + // 2. Environment info + // 3. block + // 4. AGENTS.md content (prefixed with "Instructions from:") + const input: InputItem[] = [ + { + type: 'message', + role: 'developer', + content: `You are a coding agent running in the opencode, a terminal-based coding assistant. + +Here is some useful information about the environment you are running in: + + Working directory: /Users/test/project + Platform: darwin + + + src/ + index.ts + +Instructions from: /Users/test/project/AGENTS.md +# Project Guidelines + +Use TypeScript for all new code. +Follow existing patterns in the codebase. + +Instructions from: /Users/test/.config/opencode/AGENTS.md +# Global Settings + +Always use mise for tool management.`, + }, + { type: 'message', role: 'user', content: 'hello' }, + ]; + + const result = await filterOpenCodeSystemPrompts(input); + + // Should have 2 messages: extracted AGENTS.md content + user message + expect(result).toHaveLength(2); + expect(result![0].role).toBe('developer'); + const content0 = Array.isArray(result![0].content) ? (result![0].content as any)[0].text : result![0].content; + expect(content0).toContain('Instructions from:'); + expect(content0).toContain('Project Guidelines'); + expect(content0).toContain('Global Settings'); + // Should NOT contain the OpenCode base prompt + expect(content0).not.toContain('You are a coding agent running in'); + expect(result![1].role).toBe('user'); + }); + + it('should preserve multiple AGENTS.md files in concatenated message', async () => { + const input: InputItem[] = [ + { + type: 'message', + role: 'developer', + content: `You are a coding agent running in the opencode... + +Instructions from: /project/AGENTS.md +# Project AGENTS.md +Project-specific instructions here. + +Instructions from: /project/src/AGENTS.md +# Nested AGENTS.md +More specific instructions for src folder. + +Instructions from: ~/.config/opencode/AGENTS.md +# Global AGENTS.md +Global instructions here.`, + }, + { type: 'message', role: 'user', content: 'test' }, + ]; + + const result = await filterOpenCodeSystemPrompts(input); + + expect(result).toHaveLength(2); + // All AGENTS.md content should be preserved + const contentText = getContentText(result![0]); + expect(contentText).toContain('Project AGENTS.md'); + expect(contentText).toContain('Nested AGENTS.md'); + expect(contentText).toContain('Global AGENTS.md'); + }); + + it('should handle concatenated message with no AGENTS.md (just base prompt)', async () => { + const input: InputItem[] = [ + { + type: 'message', + role: 'developer', + content: 'You are a coding agent running in the opencode, a terminal-based coding assistant.', + }, + { type: 'message', role: 'user', content: 'hello' }, + ]; + + const result = await filterOpenCodeSystemPrompts(input); + + // Should just have the user message (base prompt filtered, no AGENTS.md to preserve) + expect(result).toHaveLength(1); + expect(result![0].role).toBe('user'); + }); + + it('should handle array content format in concatenated message', async () => { + const input: InputItem[] = [ + { + type: 'message', + role: 'developer', + content: [ + { + type: 'input_text', + text: `You are a coding agent running in the opencode... + +Instructions from: /project/AGENTS.md +# My Custom Instructions +Do things this way.`, + }, + ], + }, + { type: 'message', role: 'user', content: 'hello' }, + ]; + + const result = await filterOpenCodeSystemPrompts(input); + + expect(result).toHaveLength(2); + const contentText = getContentText(result![0]); + expect(contentText).toContain('My Custom Instructions'); + expect(contentText).not.toContain('You are a coding agent'); + }); + }); }); describe('addCodexBridgeMessage', () => { @@ -947,7 +1091,7 @@ describe('Request Transformer Module', () => { expect(result.reasoning?.effort).toBe('low'); }); - it('should preserve minimal for non-codex models', async () => { + it('should normalize minimal to low for gpt-5 (which maps to gpt-5.1)', async () => { const body: RequestBody = { model: 'gpt-5', input: [], @@ -957,7 +1101,8 @@ describe('Request Transformer Module', () => { models: {}, }; const result = await transformRequestBody(body, codexInstructions, userConfig); - expect(result.reasoning?.effort).toBe('minimal'); + // gpt-5 normalizes to gpt-5.1, and minimal is normalized to low for all models + expect(result.reasoning?.effort).toBe('low'); }); it('should use minimal effort for lightweight models', async () => {