Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ permissions:
contents: read
checks: write

env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true

jobs:
unit:
name: unit (${{ matrix.settings.name }})
Expand All @@ -38,6 +41,11 @@ jobs:
with:
token: ${{ secrets.GITHUB_TOKEN }}

- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: "24"

- name: Setup Bun
uses: ./.github/actions/setup-bun

Expand Down Expand Up @@ -102,6 +110,11 @@ jobs:
with:
token: ${{ secrets.GITHUB_TOKEN }}

- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: "24"

- name: Setup Bun
uses: ./.github/actions/setup-bun

Expand Down
10 changes: 7 additions & 3 deletions packages/app/e2e/backend.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,12 @@ async function waitForHealth(url: string, probe = "/global/health") {
throw new Error(`Timed out waiting for backend health at ${url}${probe}${last ? ` (${last})` : ""}`)
}

function done(proc: ReturnType<typeof spawn>) {
return proc.exitCode !== null || proc.signalCode !== null
}

async function waitExit(proc: ReturnType<typeof spawn>, timeout = 10_000) {
if (proc.exitCode !== null) return
if (done(proc)) return
await Promise.race([
new Promise<void>((resolve) => proc.once("exit", () => resolve())),
new Promise<void>((resolve) => setTimeout(resolve, timeout)),
Expand Down Expand Up @@ -123,11 +127,11 @@ export async function startBackend(label: string, input?: { llmUrl?: string }):
return {
url,
async stop() {
if (proc.exitCode === null) {
if (!done(proc)) {
proc.kill("SIGTERM")
await waitExit(proc)
}
if (proc.exitCode === null) {
if (!done(proc)) {
proc.kill("SIGKILL")
await waitExit(proc)
}
Expand Down
2 changes: 2 additions & 0 deletions packages/opencode/src/provider/provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -853,6 +853,7 @@ export namespace Provider {
field: z.enum(["reasoning_content", "reasoning_details"]),
}),
]),
systemMessage: z.enum(["single", "multiple"]).optional(),
}),
cost: z.object({
input: z.number(),
Expand Down Expand Up @@ -988,6 +989,7 @@ export namespace Provider {
pdf: model.modalities?.output?.includes("pdf") ?? false,
},
interleaved: model.interleaved ?? false,
systemMessage: undefined,
},
release_date: model.release_date,
variants: {},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,38 @@ function getOpenAIMetadata(message: { providerOptions?: SharedV3ProviderOptions

export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV3Prompt): OpenAICompatibleChatPrompt {
const messages: OpenAICompatibleChatPrompt = []

const systemPrompt: string[] = []
for (const { role, content } of prompt) {
if (role === "system") {
systemPrompt.push(content)
}
}

const hasSystem = systemPrompt.length > 0
const hasOthers = prompt.some((m: LanguageModelV3Prompt[number]) => m.role !== "system")

if (hasSystem) {
if (hasOthers) {
messages.push({
role: "system",
content: systemPrompt.join("\n\n"),
})
} else {
// If there are only system messages, some APIs (like OpenAI) will fail.
// We convert them to a user message in this case.
messages.push({
role: "user",
content: systemPrompt.join("\n\n"),
})
}
}

for (const { role, content, ...message } of prompt) {
const metadata = getOpenAIMetadata({ ...message })
switch (role) {
case "system": {
messages.push({
role: "system",
content: content,
...metadata,
})
// Handled above
break
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -226,8 +226,8 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
})
}

// reasoning content (Copilot uses reasoning_text):
const reasoning = choice.message.reasoning_text
// reasoning content (Copilot uses reasoning_text, DeepSeek/oMLX uses reasoning_content):
const reasoning = choice.message.reasoning_text ?? choice.message.reasoning_content
if (reasoning != null && reasoning.length > 0) {
content.push({
type: "reasoning",
Expand Down Expand Up @@ -477,8 +477,8 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
reasoningOpaque = delta.reasoning_opaque
}

// enqueue reasoning before text deltas (Copilot uses reasoning_text):
const reasoningContent = delta.reasoning_text
// enqueue reasoning before text deltas (Copilot uses reasoning_text, DeepSeek/oMLX uses reasoning_content):
const reasoningContent = delta.reasoning_text ?? delta.reasoning_content
if (reasoningContent) {
if (!isActiveReasoning) {
controller.enqueue({
Expand Down Expand Up @@ -757,6 +757,7 @@ const OpenAICompatibleChatResponseSchema = z.object({
// Copilot-specific reasoning fields
reasoning_text: z.string().nullish(),
reasoning_opaque: z.string().nullish(),
reasoning_content: z.string().nullish(),
tool_calls: z
.array(
z.object({
Expand Down Expand Up @@ -792,6 +793,7 @@ const createOpenAICompatibleChatChunkSchema = <ERROR_SCHEMA extends z.core.$ZodT
// Copilot-specific reasoning fields
reasoning_text: z.string().nullish(),
reasoning_opaque: z.string().nullish(),
reasoning_content: z.string().nullish(),
tool_calls: z
.array(
z.object({
Expand Down
37 changes: 28 additions & 9 deletions packages/opencode/src/session/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,23 @@ export namespace LLM {
const log = Log.create({ service: "llm" })
export const OUTPUT_TOKEN_MAX = ProviderTransform.OUTPUT_TOKEN_MAX

function getSystemMessageMode(model: Provider.Model): "single" | "multiple" {
if (model.capabilities.systemMessage) {
return model.capabilities.systemMessage
}
const providerDefaults: Record<string, "single" | "multiple"> = {
anthropic: "multiple",
}
const mode = providerDefaults[model.providerID] ?? "single"
if (!providerDefaults[model.providerID]) {
log.info("Using default 'single' systemMessage mode for provider", {
providerID: model.providerID,
modelID: model.id,
})
}
return mode
}

export type StreamInput = {
user: MessageV2.User
sessionID: string
Expand Down Expand Up @@ -156,15 +173,17 @@ export namespace LLM {
? input.messages
: isWorkflow
? input.messages
: [
...system.map(
(x): ModelMessage => ({
role: "system",
content: x,
}),
),
...input.messages,
]
: getSystemMessageMode(input.model) === "multiple"
? [
...system.map(
(x): ModelMessage => ({
role: "system",
content: x,
}),
),
...input.messages,
]
: ([{ role: "system", content: system.join("\n") }, ...input.messages] as ModelMessage[])

const params = await Plugin.trigger(
"chat.params",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import { convertToOpenAICompatibleChatMessages as convertToCopilotMessages } fro
import { describe, test, expect } from "bun:test"

describe("system messages", () => {
test("should convert system message content to string", () => {
test("should convert lone system message content to a user message", () => {
const result = convertToCopilotMessages([
{
role: "system",
Expand All @@ -12,11 +12,24 @@ describe("system messages", () => {

expect(result).toEqual([
{
role: "system",
role: "user",
content: "You are a helpful assistant with AGENTS.md instructions.",
},
])
})

test("should merge multiple system messages into one", () => {
const result = convertToCopilotMessages([
{ role: "system", content: "System 1" },
{ role: "system", content: "System 2" },
{ role: "user", content: [{ type: "text", text: "Hello" }] },
])

expect(result).toEqual([
{ role: "system", content: "System 1\n\nSystem 2" },
{ role: "user", content: "Hello" },
])
})
})

describe("user messages", () => {
Expand Down
Loading