From f1933ace7ce12252e5bff2cac9d41b84d17bce83 Mon Sep 17 00:00:00 2001 From: OpeOginni Date: Fri, 3 Apr 2026 13:59:25 +0100 Subject: [PATCH 1/9] fix: add instruction back to params for codex oauth provider to prevent BAD REQUEST error --- packages/opencode/src/agent/agent.ts | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts index 0c6fe6ec91c8..73053804f141 100644 --- a/packages/opencode/src/agent/agent.ts +++ b/packages/opencode/src/agent/agent.ts @@ -341,6 +341,16 @@ export namespace Agent { ) const existing = yield* InstanceState.useEffect(state, (s) => s.list()) + // TODO: clean this up so provider specific logic doesnt bleed over + const authInfo = yield* auth.get(model.providerID).pipe(Effect.orDie) + + const isOpenaiOauth = model.providerID === "openai" && authInfo?.type === "oauth" + + const USER_MESSAGE_CONTENT = { + role: "user", + content: `Create an agent configuration based on this request: \"${input.description}\".\n\nIMPORTANT: The following identifiers already exist and must NOT be used: ${existing.map((i) => i.name).join(", ")}\n Return ONLY the JSON object, no other text, do not wrap in backticks`, + } as ModelMessage + const params = { experimental_telemetry: { isEnabled: cfg.experimental?.openTelemetry, @@ -349,17 +359,16 @@ export namespace Agent { }, }, temperature: 0.3, - messages: [ + messages: isOpenaiOauth ? [ + USER_MESSAGE_CONTENT + ] : [ ...system.map( (item): ModelMessage => ({ role: "system", content: item, }), ), - { - role: "user", - content: `Create an agent configuration based on this request: \"${input.description}\".\n\nIMPORTANT: The following identifiers already exist and must NOT be used: ${existing.map((i) => i.name).join(", ")}\n Return ONLY the JSON object, no other text, do not wrap in backticks`, - }, + USER_MESSAGE_CONTENT ], model: language, schema: z.object({ @@ -369,13 +378,13 @@ export namespace Agent { }), } satisfies Parameters[0] - // TODO: clean this up so provider specific logic doesnt bleed over - const authInfo = yield* auth.get(model.providerID).pipe(Effect.orDie) + if (model.providerID === "openai" && authInfo?.type === "oauth") { return yield* Effect.promise(async () => { const result = streamObject({ ...params, providerOptions: ProviderTransform.providerOptions(resolved, { + instructions: system.join("\n"), store: false, }), onError: () => {}, From 7445b445cd8014b9d3e8bc4dc595fe4c96883c0e Mon Sep 17 00:00:00 2001 From: OpeOginni Date: Fri, 3 Apr 2026 14:10:21 +0100 Subject: [PATCH 2/9] chore: removed partial system prompt addition, to match llm.ts logic exactly --- packages/opencode/src/agent/agent.ts | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts index 73053804f141..ba285218b4f0 100644 --- a/packages/opencode/src/agent/agent.ts +++ b/packages/opencode/src/agent/agent.ts @@ -341,16 +341,6 @@ export namespace Agent { ) const existing = yield* InstanceState.useEffect(state, (s) => s.list()) - // TODO: clean this up so provider specific logic doesnt bleed over - const authInfo = yield* auth.get(model.providerID).pipe(Effect.orDie) - - const isOpenaiOauth = model.providerID === "openai" && authInfo?.type === "oauth" - - const USER_MESSAGE_CONTENT = { - role: "user", - content: `Create an agent configuration based on this request: \"${input.description}\".\n\nIMPORTANT: The following identifiers already exist and must NOT be used: ${existing.map((i) => i.name).join(", ")}\n Return ONLY the JSON object, no other text, do not wrap in backticks`, - } as ModelMessage - const params = { experimental_telemetry: { isEnabled: cfg.experimental?.openTelemetry, @@ -359,16 +349,17 @@ export namespace Agent { }, }, temperature: 0.3, - messages: isOpenaiOauth ? [ - USER_MESSAGE_CONTENT - ] : [ + messages: [ ...system.map( (item): ModelMessage => ({ role: "system", content: item, }), ), - USER_MESSAGE_CONTENT + { + role: "user", + content: `Create an agent configuration based on this request: \"${input.description}\".\n\nIMPORTANT: The following identifiers already exist and must NOT be used: ${existing.map((i) => i.name).join(", ")}\n Return ONLY the JSON object, no other text, do not wrap in backticks`, + } ], model: language, schema: z.object({ @@ -378,7 +369,8 @@ export namespace Agent { }), } satisfies Parameters[0] - + // TODO: clean this up so provider specific logic doesnt bleed over + const authInfo = yield* auth.get(model.providerID).pipe(Effect.orDie) if (model.providerID === "openai" && authInfo?.type === "oauth") { return yield* Effect.promise(async () => { const result = streamObject({ From 74c9967983bcef04ac069586825cbfe855688093 Mon Sep 17 00:00:00 2001 From: OpeOginni Date: Fri, 3 Apr 2026 14:13:38 +0100 Subject: [PATCH 3/9] chore: format file changes --- packages/opencode/src/agent/agent.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts index ba285218b4f0..0f92fdbe76c6 100644 --- a/packages/opencode/src/agent/agent.ts +++ b/packages/opencode/src/agent/agent.ts @@ -359,7 +359,7 @@ export namespace Agent { { role: "user", content: `Create an agent configuration based on this request: \"${input.description}\".\n\nIMPORTANT: The following identifiers already exist and must NOT be used: ${existing.map((i) => i.name).join(", ")}\n Return ONLY the JSON object, no other text, do not wrap in backticks`, - } + }, ], model: language, schema: z.object({ From ab9f482fab1f7b12ea36988b16f473d2ef7eda18 Mon Sep 17 00:00:00 2001 From: OpeOginni Date: Fri, 3 Apr 2026 14:19:23 +0100 Subject: [PATCH 4/9] fix: added back conditional system message for openau oauth models following https://github.com/anomalyco/opencode/blob/dev/packages/opencode/src/session/llm.ts#L149 --- packages/opencode/src/agent/agent.ts | 35 +++++++++++++++++----------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts index 0f92fdbe76c6..735fd3dc2d98 100644 --- a/packages/opencode/src/agent/agent.ts +++ b/packages/opencode/src/agent/agent.ts @@ -341,6 +341,16 @@ export namespace Agent { ) const existing = yield* InstanceState.useEffect(state, (s) => s.list()) + // TODO: clean this up so provider specific logic doesnt bleed over + const authInfo = yield* auth.get(model.providerID).pipe(Effect.orDie) + + const isOpenaiOauth = model.providerID === "openai" && authInfo?.type === "oauth" + + const USER_MESSAGE_CONTENT = { + role: "user", + content: `Create an agent configuration based on this request: \"${input.description}\".\n\nIMPORTANT: The following identifiers already exist and must NOT be used: ${existing.map((i) => i.name).join(", ")}\n Return ONLY the JSON object, no other text, do not wrap in backticks`, + } as ModelMessage + const params = { experimental_telemetry: { isEnabled: cfg.experimental?.openTelemetry, @@ -349,18 +359,17 @@ export namespace Agent { }, }, temperature: 0.3, - messages: [ - ...system.map( - (item): ModelMessage => ({ - role: "system", - content: item, - }), - ), - { - role: "user", - content: `Create an agent configuration based on this request: \"${input.description}\".\n\nIMPORTANT: The following identifiers already exist and must NOT be used: ${existing.map((i) => i.name).join(", ")}\n Return ONLY the JSON object, no other text, do not wrap in backticks`, - }, - ], + messages: isOpenaiOauth + ? [USER_MESSAGE_CONTENT] + : [ + ...system.map( + (item): ModelMessage => ({ + role: "system", + content: item, + }), + ), + USER_MESSAGE_CONTENT, + ], model: language, schema: z.object({ identifier: z.string(), @@ -369,8 +378,6 @@ export namespace Agent { }), } satisfies Parameters[0] - // TODO: clean this up so provider specific logic doesnt bleed over - const authInfo = yield* auth.get(model.providerID).pipe(Effect.orDie) if (model.providerID === "openai" && authInfo?.type === "oauth") { return yield* Effect.promise(async () => { const result = streamObject({ From 39726f5f7491448455c70ecfcb829d8456212cbc Mon Sep 17 00:00:00 2001 From: OpeOginni Date: Wed, 8 Apr 2026 21:57:20 +0100 Subject: [PATCH 5/9] refactor(agent): streamline agent configuration generation and integrate LLM object generation --- packages/opencode/src/agent/agent.ts | 69 ++++++-------------------- packages/opencode/src/session/llm.ts | 74 +++++++++++++++++++++++++++- 2 files changed, 88 insertions(+), 55 deletions(-) diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts index 735fd3dc2d98..070e98167a32 100644 --- a/packages/opencode/src/agent/agent.ts +++ b/packages/opencode/src/agent/agent.ts @@ -2,11 +2,10 @@ import { Config } from "../config/config" import z from "zod" import { Provider } from "../provider/provider" import { ModelID, ProviderID } from "../provider/schema" -import { generateObject, streamObject, type ModelMessage } from "ai" +import type { ModelMessage } from "ai" import { Instance } from "../project/instance" import { Truncate } from "../tool/truncate" import { Auth } from "../auth" -import { ProviderTransform } from "../provider/transform" import PROMPT_GENERATE from "./generate.txt" import PROMPT_COMPACTION from "./prompt/compaction.txt" @@ -22,6 +21,7 @@ import { Skill } from "../skill" import { Effect, ServiceMap, Layer } from "effect" import { InstanceState } from "@/effect/instance-state" import { makeRuntime } from "@/effect/run-service" +import { LLM } from "@/session/llm" export namespace Agent { export const Info = z @@ -73,7 +73,6 @@ export namespace Agent { Service, Effect.gen(function* () { const config = yield* Config.Service - const auth = yield* Auth.Service const skill = yield* Skill.Service const provider = yield* Provider.Service @@ -330,10 +329,8 @@ export namespace Agent { description: string model?: { providerID: ProviderID; modelID: ModelID } }) { - const cfg = yield* config.get() const model = input.model ?? (yield* provider.defaultModel()) const resolved = yield* provider.getModel(model.providerID, model.modelID) - const language = yield* provider.getLanguage(resolved) const system = [PROMPT_GENERATE] yield* Effect.promise(() => @@ -341,61 +338,25 @@ export namespace Agent { ) const existing = yield* InstanceState.useEffect(state, (s) => s.list()) - // TODO: clean this up so provider specific logic doesnt bleed over - const authInfo = yield* auth.get(model.providerID).pipe(Effect.orDie) - - const isOpenaiOauth = model.providerID === "openai" && authInfo?.type === "oauth" - const USER_MESSAGE_CONTENT = { role: "user", content: `Create an agent configuration based on this request: \"${input.description}\".\n\nIMPORTANT: The following identifiers already exist and must NOT be used: ${existing.map((i) => i.name).join(", ")}\n Return ONLY the JSON object, no other text, do not wrap in backticks`, } as ModelMessage - const params = { - experimental_telemetry: { - isEnabled: cfg.experimental?.openTelemetry, - metadata: { - userId: cfg.username ?? "unknown", - }, - }, - temperature: 0.3, - messages: isOpenaiOauth - ? [USER_MESSAGE_CONTENT] - : [ - ...system.map( - (item): ModelMessage => ({ - role: "system", - content: item, - }), - ), - USER_MESSAGE_CONTENT, - ], - model: language, - schema: z.object({ - identifier: z.string(), - whenToUse: z.string(), - systemPrompt: z.string(), + return yield* Effect.promise((abort) => + LLM.generateObject({ + abort, + temperature: 0.3, + messages: [USER_MESSAGE_CONTENT], + model: resolved, + system, + schema: z.object({ + identifier: z.string(), + whenToUse: z.string(), + systemPrompt: z.string(), + }), }), - } satisfies Parameters[0] - - if (model.providerID === "openai" && authInfo?.type === "oauth") { - return yield* Effect.promise(async () => { - const result = streamObject({ - ...params, - providerOptions: ProviderTransform.providerOptions(resolved, { - instructions: system.join("\n"), - store: false, - }), - onError: () => {}, - }) - for await (const part of result.fullStream) { - if (part.type === "error") throw part.error - } - return result.object - }) - } - - return yield* Effect.promise(() => generateObject(params).then((r) => r.object)) + ) }), }) }), diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 1813346cdc93..21af6846acfd 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -3,7 +3,16 @@ import { Log } from "@/util/log" import { Cause, Effect, Layer, Record, ServiceMap } from "effect" import * as Queue from "effect/Queue" import * as Stream from "effect/Stream" -import { streamText, wrapLanguageModel, type ModelMessage, type Tool, tool, jsonSchema } from "ai" +import { + generateObject as generateObjectAI, + streamText, + streamObject, + wrapLanguageModel, + type ModelMessage, + type Tool, + tool, + jsonSchema, +} from "ai" import { mergeDeep, pipe } from "remeda" import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider" import { ProviderTransform } from "@/provider/transform" @@ -17,6 +26,7 @@ import { Flag } from "@/flag/flag" import { Permission } from "@/permission" import { Auth } from "@/auth" import { Installation } from "@/installation" +import z from "zod" export namespace LLM { const log = Log.create({ service: "llm" }) @@ -41,6 +51,18 @@ export namespace LLM { abort: AbortSignal } + export type ObjectInput = { + model: Provider.Model + system: string[] + messages: ModelMessage[] + schema: Schema + temperature?: number + } + + export type ObjectRequest = ObjectInput & { + abort: AbortSignal + } + export type Event = Awaited>["fullStream"] extends AsyncIterable ? T : never export interface Interface { @@ -336,6 +358,56 @@ export namespace LLM { }) } + export async function generateObject(input: ObjectRequest) { + const [language, cfg, auth] = await Promise.all([ + Provider.getLanguage(input.model), + Config.get(), + Auth.get(input.model.providerID), + ]) + const isOpenaiOauth = input.model.providerID === "openai" && auth?.type === "oauth" + const messages = isOpenaiOauth + ? input.messages + : [ + ...input.system.map( + (x): ModelMessage => ({ + role: "system", + content: x, + }), + ), + ...input.messages, + ] + const params = { + experimental_telemetry: { + isEnabled: cfg.experimental?.openTelemetry, + metadata: { + userId: cfg.username ?? "unknown", + }, + }, + temperature: input.temperature, + messages, + model: language, + schema: input.schema, + abortSignal: input.abort, + } satisfies Parameters>[0] + + if (isOpenaiOauth) { + const result = streamObject({ + ...params, + providerOptions: ProviderTransform.providerOptions(input.model, { + instructions: input.system.join("\n"), + store: false, + }), + onError: () => {}, + }) + for await (const part of result.fullStream) { + if (part.type === "error") throw part.error + } + return result.object + } + + return generateObjectAI(params).then((x) => x.object) + } + function resolveTools(input: Pick) { const disabled = Permission.disabled( Object.keys(input.tools), From b6f5eace793d8809bf01de261f571fd48466aee6 Mon Sep 17 00:00:00 2001 From: OpeOginni Date: Thu, 9 Apr 2026 01:55:51 +0100 Subject: [PATCH 6/9] refactor(opencode): centralize structured output request prep --- packages/opencode/src/agent/agent.ts | 4 - packages/opencode/src/session/llm.ts | 148 ++++++++++++++------------- 2 files changed, 79 insertions(+), 73 deletions(-) diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts index 070e98167a32..5f9e971fa9fe 100644 --- a/packages/opencode/src/agent/agent.ts +++ b/packages/opencode/src/agent/agent.ts @@ -16,7 +16,6 @@ import { Permission } from "@/permission" import { mergeDeep, pipe, sortBy, values } from "remeda" import { Global } from "@/global" import path from "path" -import { Plugin } from "@/plugin" import { Skill } from "../skill" import { Effect, ServiceMap, Layer } from "effect" import { InstanceState } from "@/effect/instance-state" @@ -333,9 +332,6 @@ export namespace Agent { const resolved = yield* provider.getModel(model.providerID, model.modelID) const system = [PROMPT_GENERATE] - yield* Effect.promise(() => - Plugin.trigger("experimental.chat.system.transform", { model: resolved }, { system }), - ) const existing = yield* InstanceState.useEffect(state, (s) => s.list()) const USER_MESSAGE_CONTENT = { diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 2ec0716cc761..1ddc968ebecb 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -112,42 +112,36 @@ export namespace LLM { modelID: input.model.id, providerID: input.model.providerID, }) - const [language, cfg, provider, auth] = await Promise.all([ - Provider.getLanguage(input.model), - Config.get(), - Provider.getProvider(input.model.providerID), - Auth.get(input.model.providerID), - ]) - // TODO: move this to a proper hook - const isOpenaiOauth = provider.id === "openai" && auth?.type === "oauth" - - const system: string[] = [] - system.push( - [ - // use agent prompt otherwise provider prompt - ...(input.agent.prompt ? [input.agent.prompt] : SystemPrompt.provider(input.model)), - // any custom prompt passed into this call - ...input.system, - // any custom prompt from last user message - ...(input.user.system ? [input.user.system] : []), - ] - .filter((x) => x) - .join("\n"), - ) + const prep = await prepareSystem({ + model: input.model, + sessionID: input.sessionID, + system: [ + [ + // use agent prompt otherwise provider prompt + ...(input.agent.prompt ? [input.agent.prompt] : SystemPrompt.provider(input.model)), + // any custom prompt passed into this call + ...input.system, + // any custom prompt from last user message + ...(input.user.system ? [input.user.system] : []), + ] + .filter((x) => x) + .join("\n"), + ], + }) - const header = system[0] - await Plugin.trigger( - "experimental.chat.system.transform", - { sessionID: input.sessionID, model: input.model }, - { system }, - ) + const header = prep.system[0] // rejoin to maintain 2-part structure for caching if header unchanged - if (system.length > 2 && system[0] === header) { - const rest = system.slice(1) - system.length = 0 - system.push(header, rest.join("\n")) + if (prep.system.length > 2 && prep.system[0] === header) { + const rest = prep.system.slice(1) + prep.system.length = 0 + prep.system.push(header, rest.join("\n")) } + const language = prep.language + const cfg = prep.cfg + const provider = prep.provider + const isOpenaiOauth = provider.id === "openai" && prep.auth?.type === "oauth" + const variant = !input.small && input.model.variants && input.user.model.variant ? input.model.variants[input.user.model.variant] @@ -166,23 +160,15 @@ export namespace LLM { mergeDeep(variant), ) if (isOpenaiOauth) { - options.instructions = system.join("\n") + // OpenAI OAuth expects instructions instead of system-role messages. + options.instructions = prep.system.join("\n") } const isWorkflow = language instanceof GitLabWorkflowLanguageModel - const messages = isOpenaiOauth + // Workflow models receive the system prompt separately via `systemPrompt`. + const messages = isWorkflow ? input.messages - : isWorkflow - ? input.messages - : [ - ...system.map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), - ...input.messages, - ] + : buildMessages({ system: prep.system, messages: input.messages, provider: prep.provider, auth: prep.auth }) const params = await Plugin.trigger( "chat.params", @@ -253,7 +239,7 @@ export namespace LLM { // and results sent back over the WebSocket. if (language instanceof GitLabWorkflowLanguageModel) { const workflowModel = language - workflowModel.systemPrompt = system.join("\n") + workflowModel.systemPrompt = prep.system.join("\n") workflowModel.toolExecutor = async (toolName, argsJson, _requestID) => { const t = tools[toolName] if (!t || !t.execute) { @@ -357,42 +343,31 @@ export namespace LLM { } export async function generateObject(input: ObjectRequest) { - const [language, cfg, auth] = await Promise.all([ - Provider.getLanguage(input.model), - Config.get(), - Auth.get(input.model.providerID), - ]) - const isOpenaiOauth = input.model.providerID === "openai" && auth?.type === "oauth" - const messages = isOpenaiOauth - ? input.messages - : [ - ...input.system.map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), - ...input.messages, - ] + const prep = await prepareSystem({ model: input.model, system: input.system }) const params = { experimental_telemetry: { - isEnabled: cfg.experimental?.openTelemetry, + isEnabled: prep.cfg.experimental?.openTelemetry, metadata: { - userId: cfg.username ?? "unknown", + userId: prep.cfg.username ?? "unknown", }, }, temperature: input.temperature, - messages, - model: language, + messages: buildMessages({ + system: prep.system, + messages: input.messages, + provider: prep.provider, + auth: prep.auth, + }), + model: prep.language, schema: input.schema, abortSignal: input.abort, } satisfies Parameters>[0] - if (isOpenaiOauth) { + if (prep.provider.id === "openai" && prep.auth?.type === "oauth") { const result = streamObject({ ...params, providerOptions: ProviderTransform.providerOptions(input.model, { - instructions: input.system.join("\n"), + instructions: prep.system.join("\n"), store: false, }), onError: () => {}, @@ -406,6 +381,41 @@ export namespace LLM { return generateObjectAI(params).then((x) => x.object) } + async function prepareSystem(input: { model: Provider.Model; system: string[]; sessionID?: string }) { + const [language, cfg, provider, auth] = await Promise.all([ + Provider.getLanguage(input.model), + Config.get(), + Provider.getProvider(input.model.providerID), + Auth.get(input.model.providerID), + ]) + const system = [...input.system] + await Plugin.trigger( + "experimental.chat.system.transform", + input.sessionID ? { sessionID: input.sessionID, model: input.model } : { model: input.model }, + { system }, + ) + return { + language, + cfg, + provider, + auth, + system, + } + } + + function buildMessages(input: { system: string[]; messages: ModelMessage[]; provider: Provider.Info; auth?: Auth.Info }): ModelMessage[] { + if (input.provider.id === "openai" && input.auth?.type === "oauth") return input.messages + return [ + ...input.system.map( + (x): ModelMessage => ({ + role: "system", + content: x, + }), + ), + ...input.messages, + ] + } + function resolveTools(input: Pick) { const disabled = Permission.disabled( Object.keys(input.tools), From d98591021a06f4140c1fde65e96de9b15f9e018c Mon Sep 17 00:00:00 2001 From: OpeOginni Date: Thu, 9 Apr 2026 11:30:44 +0100 Subject: [PATCH 7/9] refactor(llm): seperated system preparation and message building process to both stream and geenrateObject methods --- packages/opencode/src/session/llm.ts | 142 +++++++++++++-------------- 1 file changed, 67 insertions(+), 75 deletions(-) diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index eb8b2ea05329..8e680c6c3962 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -116,35 +116,39 @@ export namespace LLM { modelID: input.model.id, providerID: input.model.providerID, }) - const prep = await prepareSystem({ - model: input.model, - sessionID: input.sessionID, - system: [ - [ - // use agent prompt otherwise provider prompt - ...(input.agent.prompt ? [input.agent.prompt] : SystemPrompt.provider(input.model)), - // any custom prompt passed into this call - ...input.system, - // any custom prompt from last user message - ...(input.user.system ? [input.user.system] : []), - ] - .filter((x) => x) - .join("\n"), - ], - }) + const [language, cfg, provider, auth] = await Promise.all([ + Provider.getLanguage(input.model), + Config.get(), + Provider.getProvider(input.model.providerID), + Auth.get(input.model.providerID), + ]) + const system = [ + [ + // use agent prompt otherwise provider prompt + ...(input.agent.prompt ? [input.agent.prompt] : SystemPrompt.provider(input.model)), + // any custom prompt passed into this call + ...input.system, + // any custom prompt from last user message + ...(input.user.system ? [input.user.system] : []), + ] + .filter((x) => x) + .join("\n"), + ] + await Plugin.trigger( + "experimental.chat.system.transform", + { sessionID: input.sessionID, model: input.model }, + { system }, + ) - const header = prep.system[0] + const header = system[0] // rejoin to maintain 2-part structure for caching if header unchanged - if (prep.system.length > 2 && prep.system[0] === header) { - const rest = prep.system.slice(1) - prep.system.length = 0 - prep.system.push(header, rest.join("\n")) + if (system.length > 2 && system[0] === header) { + const rest = system.slice(1) + system.length = 0 + system.push(header, rest.join("\n")) } - const language = prep.language - const cfg = prep.cfg - const provider = prep.provider - const isOpenaiOauth = provider.id === "openai" && prep.auth?.type === "oauth" + const isOpenaiOauth = provider.id === "openai" && auth?.type === "oauth" const variant = !input.small && input.model.variants && input.user.model.variant @@ -165,14 +169,24 @@ export namespace LLM { ) if (isOpenaiOauth) { // OpenAI OAuth expects instructions instead of system-role messages. - options.instructions = prep.system.join("\n") + options.instructions = system.join("\n") } const isWorkflow = language instanceof GitLabWorkflowLanguageModel // Workflow models receive the system prompt separately via `systemPrompt`. const messages = isWorkflow ? input.messages - : buildMessages({ system: prep.system, messages: input.messages, provider: prep.provider, auth: prep.auth }) + : isOpenaiOauth + ? input.messages + : [ + ...system.map( + (x): ModelMessage => ({ + role: "system", + content: x, + }), + ), + ...input.messages, + ] const params = await Plugin.trigger( "chat.params", @@ -244,7 +258,7 @@ export namespace LLM { if (language instanceof GitLabWorkflowLanguageModel) { const workflowModel = language workflowModel.sessionID = input.sessionID - workflowModel.systemPrompt = prep.system.join("\n") + workflowModel.systemPrompt = system.join("\n") workflowModel.toolExecutor = async (toolName, argsJson, _requestID) => { const t = tools[toolName] if (!t || !t.execute) { @@ -399,31 +413,44 @@ export namespace LLM { } export async function generateObject(input: ObjectRequest) { - const prep = await prepareSystem({ model: input.model, system: input.system }) + const [language, cfg, provider, auth] = await Promise.all([ + Provider.getLanguage(input.model), + Config.get(), + Provider.getProvider(input.model.providerID), + Auth.get(input.model.providerID), + ]) + const system = [...input.system] + await Plugin.trigger("experimental.chat.system.transform", { model: input.model }, { system }) const params = { experimental_telemetry: { - isEnabled: prep.cfg.experimental?.openTelemetry, + isEnabled: cfg.experimental?.openTelemetry, metadata: { - userId: prep.cfg.username ?? "unknown", + userId: cfg.username ?? "unknown", }, }, temperature: input.temperature, - messages: buildMessages({ - system: prep.system, - messages: input.messages, - provider: prep.provider, - auth: prep.auth, - }), - model: prep.language, + messages: + provider.id === "openai" && auth?.type === "oauth" + ? input.messages + : [ + ...system.map( + (x): ModelMessage => ({ + role: "system", + content: x, + }), + ), + ...input.messages, + ], + model: language, schema: input.schema, abortSignal: input.abort, } satisfies Parameters>[0] - if (prep.provider.id === "openai" && prep.auth?.type === "oauth") { + if (provider.id === "openai" && auth?.type === "oauth") { const result = streamObject({ ...params, providerOptions: ProviderTransform.providerOptions(input.model, { - instructions: prep.system.join("\n"), + instructions: system.join("\n"), store: false, }), onError: () => {}, @@ -437,41 +464,6 @@ export namespace LLM { return generateObjectAI(params).then((x) => x.object) } - async function prepareSystem(input: { model: Provider.Model; system: string[]; sessionID?: string }) { - const [language, cfg, provider, auth] = await Promise.all([ - Provider.getLanguage(input.model), - Config.get(), - Provider.getProvider(input.model.providerID), - Auth.get(input.model.providerID), - ]) - const system = [...input.system] - await Plugin.trigger( - "experimental.chat.system.transform", - input.sessionID ? { sessionID: input.sessionID, model: input.model } : { model: input.model }, - { system }, - ) - return { - language, - cfg, - provider, - auth, - system, - } - } - - function buildMessages(input: { system: string[]; messages: ModelMessage[]; provider: Provider.Info; auth?: Auth.Info }): ModelMessage[] { - if (input.provider.id === "openai" && input.auth?.type === "oauth") return input.messages - return [ - ...input.system.map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), - ...input.messages, - ] - } - function resolveTools(input: Pick) { const disabled = Permission.disabled( Object.keys(input.tools), From 540aa88296838a7d32a97d07b9ae3aaef33ecad1 Mon Sep 17 00:00:00 2001 From: OpeOginni Date: Thu, 9 Apr 2026 11:38:08 +0100 Subject: [PATCH 8/9] chore: reduce unneded updates --- packages/opencode/src/session/llm.ts | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 8e680c6c3962..33220c5db375 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -122,7 +122,11 @@ export namespace LLM { Provider.getProvider(input.model.providerID), Auth.get(input.model.providerID), ]) - const system = [ + // TODO: move this to a proper hook + const isOpenaiOauth = provider.id === "openai" && auth?.type === "oauth" + + const system: string[] = [] + system.push( [ // use agent prompt otherwise provider prompt ...(input.agent.prompt ? [input.agent.prompt] : SystemPrompt.provider(input.model)), @@ -133,14 +137,15 @@ export namespace LLM { ] .filter((x) => x) .join("\n"), - ] + ) + + const header = system[0] await Plugin.trigger( "experimental.chat.system.transform", { sessionID: input.sessionID, model: input.model }, { system }, ) - const header = system[0] // rejoin to maintain 2-part structure for caching if header unchanged if (system.length > 2 && system[0] === header) { const rest = system.slice(1) @@ -148,8 +153,6 @@ export namespace LLM { system.push(header, rest.join("\n")) } - const isOpenaiOauth = provider.id === "openai" && auth?.type === "oauth" - const variant = !input.small && input.model.variants && input.user.model.variant ? input.model.variants[input.user.model.variant] @@ -173,7 +176,6 @@ export namespace LLM { } const isWorkflow = language instanceof GitLabWorkflowLanguageModel - // Workflow models receive the system prompt separately via `systemPrompt`. const messages = isWorkflow ? input.messages : isOpenaiOauth From efe75dc4cb9bdd3b238759b1f9a9a59ac2662d58 Mon Sep 17 00:00:00 2001 From: OpeOginni Date: Thu, 9 Apr 2026 11:42:44 +0100 Subject: [PATCH 9/9] fix(llm): chore chore chore --- packages/opencode/src/session/llm.ts | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 33220c5db375..38df666fb588 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -145,7 +145,6 @@ export namespace LLM { { sessionID: input.sessionID, model: input.model }, { system }, ) - // rejoin to maintain 2-part structure for caching if header unchanged if (system.length > 2 && system[0] === header) { const rest = system.slice(1) @@ -171,14 +170,13 @@ export namespace LLM { mergeDeep(variant), ) if (isOpenaiOauth) { - // OpenAI OAuth expects instructions instead of system-role messages. options.instructions = system.join("\n") } const isWorkflow = language instanceof GitLabWorkflowLanguageModel - const messages = isWorkflow + const messages = isOpenaiOauth ? input.messages - : isOpenaiOauth + : isWorkflow ? input.messages : [ ...system.map(