From 4a160f95a9e0821a2c7d27c9dc2f1e2a32be7515 Mon Sep 17 00:00:00 2001 From: CrytsalTraveler Date: Tue, 10 Mar 2026 15:32:56 +0800 Subject: [PATCH 1/5] feat: add systemMessage capability to support single vs multiple system messages - Add optional systemMessage field to capability schema ('single' | 'multiple') - Add getSystemMessageMode helper with hybrid fallback (Models.dev + defaults) - Anthropic: multiple system messages (existing behavior) - All others: single system message joined with newline Fixes 'Chat template error: System message must be at the beginning' for Qwen3.5, Llama, Ollama --- packages/opencode/src/provider/provider.ts | 2 ++ packages/opencode/src/session/llm.ts | 31 +++++++++++++++++----- 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index ccd3c55b4f1..10c61aca37a 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -707,6 +707,7 @@ export namespace Provider { field: z.enum(["reasoning_content", "reasoning_details"]), }), ]), + systemMessage: z.enum(["single", "multiple"]).optional(), }), cost: z.object({ input: z.number(), @@ -814,6 +815,7 @@ export namespace Provider { pdf: model.modalities?.output?.includes("pdf") ?? false, }, interleaved: model.interleaved ?? false, + systemMessage: undefined, }, release_date: model.release_date, variants: {}, diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 4e42fb0d2ec..761c9b52949 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -27,6 +27,23 @@ export namespace LLM { const log = Log.create({ service: "llm" }) export const OUTPUT_TOKEN_MAX = ProviderTransform.OUTPUT_TOKEN_MAX + function getSystemMessageMode(model: Provider.Model): "single" | "multiple" { + if (model.capabilities.systemMessage) { + return model.capabilities.systemMessage + } + const providerDefaults: Record = { + anthropic: "multiple", + } + const mode = providerDefaults[model.providerID] ?? "single" + if (!providerDefaults[model.providerID]) { + log.info("Using default 'single' systemMessage mode for provider", { + providerID: model.providerID, + modelID: model.id, + }) + } + return mode + } + export type StreamInput = { user: MessageV2.User sessionID: string @@ -223,12 +240,14 @@ export namespace LLM { }, maxRetries: input.retries ?? 0, messages: [ - ...system.map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), + ...(getSystemMessageMode(input.model) === "multiple" + ? system.map( + (x): ModelMessage => ({ + role: "system", + content: x, + }), + ) + : ([{ role: "system", content: system.join("\n") }] as ModelMessage[])), ...input.messages, ], model: wrapLanguageModel({ From 6279b4ec737fadeaf12dad753860721e37b1e40b Mon Sep 17 00:00:00 2001 From: CrytsalTraveler Date: Wed, 11 Mar 2026 13:44:40 +0800 Subject: [PATCH 2/5] feat: add reasoning_content support for DeepSeek/oMLX models - Add reasoning_content field to response schema (alongside Copilot's reasoning_text) - Support both fields in non-streaming and streaming responses - Enables reasoning support for local models using reasoning_content format --- .../chat/openai-compatible-chat-language-model.ts | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/packages/opencode/src/provider/sdk/copilot/chat/openai-compatible-chat-language-model.ts b/packages/opencode/src/provider/sdk/copilot/chat/openai-compatible-chat-language-model.ts index c85d3f3d178..84b61d6fec6 100644 --- a/packages/opencode/src/provider/sdk/copilot/chat/openai-compatible-chat-language-model.ts +++ b/packages/opencode/src/provider/sdk/copilot/chat/openai-compatible-chat-language-model.ts @@ -228,8 +228,8 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 { }) } - // reasoning content (Copilot uses reasoning_text): - const reasoning = choice.message.reasoning_text + // reasoning content (Copilot uses reasoning_text, DeepSeek/oMLX uses reasoning_content): + const reasoning = choice.message.reasoning_text ?? choice.message.reasoning_content if (reasoning != null && reasoning.length > 0) { content.push({ type: "reasoning", @@ -456,8 +456,8 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 { reasoningOpaque = delta.reasoning_opaque } - // enqueue reasoning before text deltas (Copilot uses reasoning_text): - const reasoningContent = delta.reasoning_text + // enqueue reasoning before text deltas (Copilot uses reasoning_text, DeepSeek/oMLX uses reasoning_content): + const reasoningContent = delta.reasoning_text ?? delta.reasoning_content if (reasoningContent) { if (!isActiveReasoning) { controller.enqueue({ @@ -722,6 +722,7 @@ const OpenAICompatibleChatResponseSchema = z.object({ // Copilot-specific reasoning fields reasoning_text: z.string().nullish(), reasoning_opaque: z.string().nullish(), + reasoning_content: z.string().nullish(), tool_calls: z .array( z.object({ @@ -757,6 +758,7 @@ const createOpenAICompatibleChatChunkSchema = Date: Wed, 11 Mar 2026 14:12:36 +0800 Subject: [PATCH 3/5] fix(convert): merge system messages and handle lone system case - Merge multiple system messages into single message with \n\n separator - Convert lone system message to user message to prevent API rejection - Add tests for system message merging and lone system conversion This ensures compatibility with APIs that require single system message or at least one user message (e.g., OpenAI, local models via oMLX) --- ...vert-to-openai-compatible-chat-messages.ts | 33 ++++++++++++++++--- .../convert-to-copilot-messages.test.ts | 17 ++++++++-- 2 files changed, 43 insertions(+), 7 deletions(-) diff --git a/packages/opencode/src/provider/sdk/copilot/chat/convert-to-openai-compatible-chat-messages.ts b/packages/opencode/src/provider/sdk/copilot/chat/convert-to-openai-compatible-chat-messages.ts index e1e3ed4c201..56b950c6170 100644 --- a/packages/opencode/src/provider/sdk/copilot/chat/convert-to-openai-compatible-chat-messages.ts +++ b/packages/opencode/src/provider/sdk/copilot/chat/convert-to-openai-compatible-chat-messages.ts @@ -12,15 +12,38 @@ function getOpenAIMetadata(message: { providerOptions?: SharedV2ProviderMetadata export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Prompt): OpenAICompatibleChatPrompt { const messages: OpenAICompatibleChatPrompt = [] + + const systemPrompt: string[] = [] + for (const { role, content } of prompt) { + if (role === "system") { + systemPrompt.push(content) + } + } + + const hasSystem = systemPrompt.length > 0 + const hasOthers = prompt.some((m: LanguageModelV2Prompt[number]) => m.role !== "system") + + if (hasSystem) { + if (hasOthers) { + messages.push({ + role: "system", + content: systemPrompt.join("\n\n"), + }) + } else { + // If there are only system messages, some APIs (like OpenAI) will fail. + // We convert them to a user message in this case. + messages.push({ + role: "user", + content: systemPrompt.join("\n\n"), + }) + } + } + for (const { role, content, ...message } of prompt) { const metadata = getOpenAIMetadata({ ...message }) switch (role) { case "system": { - messages.push({ - role: "system", - content: content, - ...metadata, - }) + // Handled above break } diff --git a/packages/opencode/test/provider/copilot/convert-to-copilot-messages.test.ts b/packages/opencode/test/provider/copilot/convert-to-copilot-messages.test.ts index 6f874db6d2e..a2767d39871 100644 --- a/packages/opencode/test/provider/copilot/convert-to-copilot-messages.test.ts +++ b/packages/opencode/test/provider/copilot/convert-to-copilot-messages.test.ts @@ -2,7 +2,7 @@ import { convertToOpenAICompatibleChatMessages as convertToCopilotMessages } fro import { describe, test, expect } from "bun:test" describe("system messages", () => { - test("should convert system message content to string", () => { + test("should convert lone system message content to a user message", () => { const result = convertToCopilotMessages([ { role: "system", @@ -12,11 +12,24 @@ describe("system messages", () => { expect(result).toEqual([ { - role: "system", + role: "user", content: "You are a helpful assistant with AGENTS.md instructions.", }, ]) }) + + test("should merge multiple system messages into one", () => { + const result = convertToCopilotMessages([ + { role: "system", content: "System 1" }, + { role: "system", content: "System 2" }, + { role: "user", content: [{ type: "text", text: "Hello" }] }, + ]) + + expect(result).toEqual([ + { role: "system", content: "System 1\n\nSystem 2" }, + { role: "user", content: "Hello" }, + ]) + }) }) describe("user messages", () => { From f327584e0ad13cbd39bc47753fec68b4b58cdc46 Mon Sep 17 00:00:00 2001 From: Luke Parker <10430890+Hona@users.noreply.github.com> Date: Fri, 10 Apr 2026 10:00:46 +1000 Subject: [PATCH 4/5] fix windows e2e backend not stopping on sigterm waiting 10s for no reason (#21781) --- packages/app/e2e/backend.ts | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/app/e2e/backend.ts b/packages/app/e2e/backend.ts index 9febc4b3ff4..a03d1d43750 100644 --- a/packages/app/e2e/backend.ts +++ b/packages/app/e2e/backend.ts @@ -44,8 +44,12 @@ async function waitForHealth(url: string, probe = "/global/health") { throw new Error(`Timed out waiting for backend health at ${url}${probe}${last ? ` (${last})` : ""}`) } +function done(proc: ReturnType) { + return proc.exitCode !== null || proc.signalCode !== null +} + async function waitExit(proc: ReturnType, timeout = 10_000) { - if (proc.exitCode !== null) return + if (done(proc)) return await Promise.race([ new Promise((resolve) => proc.once("exit", () => resolve())), new Promise((resolve) => setTimeout(resolve, timeout)), @@ -123,11 +127,11 @@ export async function startBackend(label: string, input?: { llmUrl?: string }): return { url, async stop() { - if (proc.exitCode === null) { + if (!done(proc)) { proc.kill("SIGTERM") await waitExit(proc) } - if (proc.exitCode === null) { + if (!done(proc)) { proc.kill("SIGKILL") await waitExit(proc) } From da2e91cf60ab4ae49774531e81876ce67eb601b8 Mon Sep 17 00:00:00 2001 From: Luke Parker <10430890+Hona@users.noreply.github.com> Date: Fri, 10 Apr 2026 10:01:10 +1000 Subject: [PATCH 5/5] ci use node 24 in test workflow fixing random ECONNRESET (#21782) --- .github/workflows/test.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 70a8477fb51..510f682549e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,6 +17,9 @@ permissions: contents: read checks: write +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true + jobs: unit: name: unit (${{ matrix.settings.name }}) @@ -38,6 +41,11 @@ jobs: with: token: ${{ secrets.GITHUB_TOKEN }} + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: "24" + - name: Setup Bun uses: ./.github/actions/setup-bun @@ -102,6 +110,11 @@ jobs: with: token: ${{ secrets.GITHUB_TOKEN }} + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: "24" + - name: Setup Bun uses: ./.github/actions/setup-bun