Skip to content

Commit c3b2911

Browse files
committed
fix: add tool call context for better conv
1 parent 0bee52f commit c3b2911

File tree

2 files changed

+129
-59
lines changed

2 files changed

+129
-59
lines changed

src/main/presenter/llmProviderPresenter/providers/openAICompatibleProvider.ts

Lines changed: 110 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -250,14 +250,27 @@ export class OpenAICompatibleProvider extends BaseLLMProvider {
250250
/**
251251
* User messages: Upper layer will insert image_url based on whether vision exists
252252
* Assistant messages: Need to judge and convert images to correct context, as models can be switched
253-
* Tool calls and tool responses: Convert to plain text and merge into assistant messages to avoid API validation errors
254-
* @param messages
255-
* @returns
253+
* Tool calls and tool responses:
254+
* - If supportsFunctionCall=true: Use standard OpenAI format (tool_calls + role:tool)
255+
* - If supportsFunctionCall=false: Convert to mock user messages with function_call_record format
256+
* @param messages - Chat messages array
257+
* @param supportsFunctionCall - Whether the model supports native function calling
258+
* @returns Formatted messages for OpenAI API
256259
*/
257-
protected formatMessages(messages: ChatMessage[]): ChatCompletionMessageParam[] {
260+
protected formatMessages(
261+
messages: ChatMessage[],
262+
supportsFunctionCall: boolean = false
263+
): ChatCompletionMessageParam[] {
258264
const result: ChatCompletionMessageParam[] = []
265+
// Track pending tool calls for non-FC models (to pair with tool responses)
266+
const pendingToolCalls: Map<
267+
string,
268+
{ name: string; arguments: string; assistantContent?: string }
269+
> = new Map()
270+
271+
for (let i = 0; i < messages.length; i++) {
272+
const msg = messages[i]
259273

260-
for (const msg of messages) {
261274
// Handle basic message structure
262275
const baseMessage: Partial<ChatCompletionMessageParam> = {
263276
role: msg.role as 'system' | 'user' | 'assistant' | 'tool'
@@ -293,66 +306,99 @@ export class OpenAICompatibleProvider extends BaseLLMProvider {
293306
continue
294307
}
295308

296-
// Handle assistant messages with tool_calls - convert to plain text
309+
// Handle assistant messages with tool_calls
297310
if (msg.role === 'assistant' && msg.tool_calls && msg.tool_calls.length > 0) {
298-
const contentParts: string[] = []
299-
300-
// Add original assistant content if exists (ensure it's a string)
301-
if (baseMessage.content) {
302-
const contentStr =
303-
typeof baseMessage.content === 'string'
304-
? baseMessage.content
305-
: JSON.stringify(baseMessage.content)
306-
contentParts.push(contentStr)
307-
}
311+
if (supportsFunctionCall) {
312+
// Standard OpenAI format - preserve tool_calls structure
313+
result.push({
314+
role: 'assistant',
315+
content: baseMessage.content || null,
316+
tool_calls: msg.tool_calls
317+
} as ChatCompletionMessageParam)
318+
} else {
319+
// Mock format: Store tool calls and assistant content, wait for tool responses
320+
// First add the assistant message if it has content
321+
if (baseMessage.content) {
322+
result.push({
323+
role: 'assistant',
324+
content: baseMessage.content
325+
} as ChatCompletionMessageParam)
326+
}
308327

309-
// Convert tool_calls to text format
310-
for (const toolCall of msg.tool_calls) {
311-
const toolCallText = `[Tool Call: ${toolCall.function?.name || 'unknown'}]`
312-
let argsText = ''
313-
try {
314-
const args =
315-
typeof toolCall.function?.arguments === 'string'
316-
? JSON.parse(toolCall.function.arguments)
317-
: toolCall.function?.arguments
318-
argsText = JSON.stringify(args, null, 2)
319-
} catch {
320-
argsText = String(toolCall.function?.arguments || '{}')
328+
// Store tool calls for pairing with responses
329+
for (const toolCall of msg.tool_calls) {
330+
const toolCallId = toolCall.id || `tool-${Date.now()}-${Math.random()}`
331+
pendingToolCalls.set(toolCallId, {
332+
name: toolCall.function?.name || 'unknown',
333+
arguments:
334+
typeof toolCall.function?.arguments === 'string'
335+
? toolCall.function.arguments
336+
: JSON.stringify(toolCall.function?.arguments || {}),
337+
assistantContent: baseMessage.content as string | undefined
338+
})
321339
}
322-
contentParts.push(`${toolCallText}\nArguments:\n\`\`\`json\n${argsText}\n\`\`\``)
323340
}
324-
325-
// Create merged assistant message
326-
result.push({
327-
role: 'assistant',
328-
content: contentParts.join('\n\n')
329-
} as ChatCompletionMessageParam)
330341
continue
331342
}
332343

333-
// Handle tool messages - append to previous message (should be assistant)
344+
// Handle tool messages
334345
if (msg.role === 'tool') {
335-
const toolContent =
336-
typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
337-
const toolResultText = `[Tool Result]\n${toolContent}`
338-
339-
// Find the last message in result and append
340-
if (result.length > 0) {
341-
const lastMessage = result[result.length - 1]
342-
// Ensure lastMessage.content is a string before appending
343-
const currentContent =
344-
typeof lastMessage.content === 'string'
345-
? lastMessage.content
346-
: JSON.stringify(lastMessage.content || '')
347-
lastMessage.content = currentContent
348-
? `${currentContent}\n\n${toolResultText}`
349-
: toolResultText
350-
} else {
351-
// If no previous message, create a new assistant message
346+
if (supportsFunctionCall) {
347+
// Standard OpenAI format - preserve role:tool with tool_call_id
352348
result.push({
353-
role: 'assistant',
354-
content: toolResultText
349+
role: 'tool',
350+
content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),
351+
tool_call_id: msg.tool_call_id || ''
355352
} as ChatCompletionMessageParam)
353+
} else {
354+
// Mock format: Create user message with function_call_record
355+
const toolCallId = msg.tool_call_id || ''
356+
const pendingCall = pendingToolCalls.get(toolCallId)
357+
358+
if (pendingCall) {
359+
// Parse arguments to JSON if it's a string
360+
let argsObj
361+
try {
362+
argsObj =
363+
typeof pendingCall.arguments === 'string'
364+
? JSON.parse(pendingCall.arguments)
365+
: pendingCall.arguments
366+
} catch {
367+
argsObj = {}
368+
}
369+
370+
// Format as function_call_record in user message
371+
const mockRecord = {
372+
function_call_record: {
373+
name: pendingCall.name,
374+
arguments: argsObj,
375+
response:
376+
typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
377+
}
378+
}
379+
380+
result.push({
381+
role: 'user',
382+
content: `<function_call>${JSON.stringify(mockRecord)}</function_call>`
383+
} as ChatCompletionMessageParam)
384+
385+
pendingToolCalls.delete(toolCallId)
386+
} else {
387+
// Fallback: tool response without matching call, still format as user message
388+
const mockRecord = {
389+
function_call_record: {
390+
name: 'unknown',
391+
arguments: {},
392+
response:
393+
typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
394+
}
395+
}
396+
397+
result.push({
398+
role: 'user',
399+
content: `<function_call>${JSON.stringify(mockRecord)}</function_call>`
400+
} as ChatCompletionMessageParam)
401+
}
356402
}
357403
continue
358404
}
@@ -378,8 +424,13 @@ export class OpenAICompatibleProvider extends BaseLLMProvider {
378424
if (!modelId) {
379425
throw new Error('Model ID is required')
380426
}
427+
428+
// Check if model supports function calling
429+
const modelConfig = this.configPresenter.getModelConfig(modelId, this.provider.id)
430+
const supportsFunctionCall = modelConfig?.functionCall || false
431+
381432
const requestParams: OpenAI.Chat.ChatCompletionCreateParams = {
382-
messages: this.formatMessages(messages),
433+
messages: this.formatMessages(messages, supportsFunctionCall),
383434
model: modelId,
384435
stream: false,
385436
temperature: temperature,
@@ -673,7 +724,9 @@ export class OpenAICompatibleProvider extends BaseLLMProvider {
673724
// 为 OpenAI 聊天补全准备消息和工具
674725
const tools = mcpTools || []
675726
const supportsFunctionCall = modelConfig?.functionCall || false // 判断是否支持原生函数调用
676-
let processedMessages = [...this.formatMessages(messages)] as ChatCompletionMessageParam[]
727+
let processedMessages = [
728+
...this.formatMessages(messages, supportsFunctionCall)
729+
] as ChatCompletionMessageParam[]
677730

678731
// 如果不支持原生函数调用但存在工具,则准备非原生函数调用提示
679732
if (tools.length > 0 && !supportsFunctionCall) {

src/main/presenter/threadPresenter/promptBuilder.ts

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ import type { MCPToolDefinition } from '../../../shared/presenter'
1919
import { ContentEnricher } from './contentEnricher'
2020
import { buildUserMessageContext, getNormalizedUserMessageText } from './messageContent'
2121
import { generateSearchPrompt } from './searchManager'
22+
import { nanoid } from 'nanoid'
2223

2324
export type PendingToolCall = {
2425
id: string
@@ -466,31 +467,47 @@ function addContextMessages(
466467
const content = msg.content as AssistantMessageBlock[]
467468
const messageContent: ChatMessageContent[] = []
468469
const toolCalls: ChatMessage['tool_calls'] = []
470+
const toolResponses: { id: string; response: string }[] = []
469471

470472
content.forEach((block) => {
471473
if (block.type === 'tool_call' && block.tool_call) {
474+
let toolCallId = block.tool_call.id || nanoid(8)
472475
toolCalls.push({
473-
id: block.tool_call.id,
476+
id: toolCallId,
474477
type: 'function',
475478
function: {
476479
name: block.tool_call.name,
477480
arguments: block.tool_call.params || ''
478481
}
479482
})
483+
// Store tool response separately to create role:tool messages
480484
if (block.tool_call.response) {
481-
messageContent.push({ type: 'text', text: block.tool_call.response })
485+
toolResponses.push({
486+
id: toolCallId,
487+
response: block.tool_call.response
488+
})
482489
}
483490
} else if (block.type === 'content' && block.content) {
484491
messageContent.push({ type: 'text', text: block.content })
485492
}
486493
})
487494

495+
// Add assistant message with tool_calls (without responses in content)
488496
if (toolCalls.length > 0) {
489497
resultMessages.push({
490498
role: 'assistant',
491499
content: messageContent.length > 0 ? messageContent : undefined,
492500
tool_calls: toolCalls
493501
})
502+
503+
// Add separate role:tool messages for each tool response
504+
toolResponses.forEach((toolResp) => {
505+
resultMessages.push({
506+
role: 'tool',
507+
content: toolResp.response,
508+
tool_call_id: toolResp.id
509+
})
510+
})
494511
} else if (messageContent.length > 0) {
495512
resultMessages.push({
496513
role: 'assistant',

0 commit comments

Comments
 (0)