diff --git a/packages/types/src/providers/moonshot.ts b/packages/types/src/providers/moonshot.ts index 7ddafab76b7..a3f34db6661 100644 --- a/packages/types/src/providers/moonshot.ts +++ b/packages/types/src/providers/moonshot.ts @@ -11,6 +11,7 @@ export const moonshotModels = { contextWindow: 131_072, supportsImages: false, supportsPromptCache: true, + supportsNativeTools: true, inputPrice: 0.6, // $0.60 per million tokens (cache miss) outputPrice: 2.5, // $2.50 per million tokens cacheWritesPrice: 0, // $0 per million tokens (cache miss) @@ -22,6 +23,7 @@ export const moonshotModels = { contextWindow: 262144, supportsImages: false, supportsPromptCache: true, + supportsNativeTools: true, inputPrice: 0.6, outputPrice: 2.5, cacheReadsPrice: 0.15, @@ -33,6 +35,7 @@ export const moonshotModels = { contextWindow: 262_144, supportsImages: false, supportsPromptCache: true, + supportsNativeTools: true, inputPrice: 2.4, // $2.40 per million tokens (cache miss) outputPrice: 10, // $10.00 per million tokens cacheWritesPrice: 0, // $0 per million tokens (cache miss) @@ -44,6 +47,7 @@ export const moonshotModels = { contextWindow: 262_144, // 262,144 tokens supportsImages: false, // Text-only (no image/vision support) supportsPromptCache: true, + supportsNativeTools: true, inputPrice: 0.6, // $0.60 per million tokens (cache miss) outputPrice: 2.5, // $2.50 per million tokens cacheWritesPrice: 0, // $0 per million tokens (cache miss) diff --git a/src/api/providers/base-openai-compatible-provider.ts b/src/api/providers/base-openai-compatible-provider.ts index 2c183b7fc8d..6678f21d68c 100644 --- a/src/api/providers/base-openai-compatible-provider.ts +++ b/src/api/providers/base-openai-compatible-provider.ts @@ -93,6 +93,9 @@ export abstract class BaseOpenAiCompatibleProvider stream_options: { include_usage: true }, ...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }), ...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }), + ...(metadata?.toolProtocol === "native" && { + parallel_tool_calls: metadata.parallelToolCalls ?? false, + }), } // Add thinking parameter if reasoning is enabled and model supports it diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index e9109b0d7f2..2a2065edd6e 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -166,6 +166,9 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl ...(reasoning && reasoning), ...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }), ...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }), + ...(metadata?.toolProtocol === "native" && { + parallel_tool_calls: metadata.parallelToolCalls ?? false, + }), } // Add max_tokens if needed @@ -242,6 +245,9 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl : [systemMessage, ...convertToOpenAiMessages(messages)], ...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }), ...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }), + ...(metadata?.toolProtocol === "native" && { + parallel_tool_calls: metadata.parallelToolCalls ?? false, + }), } // Add max_tokens if needed @@ -359,6 +365,9 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl temperature: undefined, ...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }), ...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }), + ...(metadata?.toolProtocol === "native" && { + parallel_tool_calls: metadata.parallelToolCalls ?? false, + }), } // O3 family models do not support the deprecated max_tokens parameter @@ -391,6 +400,9 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl temperature: undefined, ...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }), ...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }), + ...(metadata?.toolProtocol === "native" && { + parallel_tool_calls: metadata.parallelToolCalls ?? false, + }), } // O3 family models do not support the deprecated max_tokens parameter