diff --git a/packages/types/src/providers/fireworks.ts b/packages/types/src/providers/fireworks.ts index 4e4f90dc722..35803884741 100644 --- a/packages/types/src/providers/fireworks.ts +++ b/packages/types/src/providers/fireworks.ts @@ -3,6 +3,7 @@ import type { ModelInfo } from "../model.js" export type FireworksModelId = | "accounts/fireworks/models/kimi-k2-instruct" | "accounts/fireworks/models/kimi-k2-instruct-0905" + | "accounts/fireworks/models/kimi-k2-thinking" | "accounts/fireworks/models/minimax-m2" | "accounts/fireworks/models/qwen3-235b-a22b-instruct-2507" | "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct" @@ -41,6 +42,21 @@ export const fireworksModels = { description: "Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities.", }, + "accounts/fireworks/models/kimi-k2-thinking": { + maxTokens: 16000, + contextWindow: 256000, + supportsImages: false, + supportsPromptCache: true, + supportsNativeTools: true, + supportsTemperature: true, + preserveReasoning: true, + defaultTemperature: 1.0, + inputPrice: 0.6, + outputPrice: 2.5, + cacheReadsPrice: 0.15, + description: + "The kimi-k2-thinking model is a general-purpose agentic reasoning model developed by Moonshot AI. Thanks to its strength in deep reasoning and multi-turn tool use, it can solve even the hardest problems.", + }, "accounts/fireworks/models/minimax-m2": { maxTokens: 4096, contextWindow: 204800, diff --git a/src/api/providers/__tests__/fireworks.spec.ts b/src/api/providers/__tests__/fireworks.spec.ts index 9b837fef609..ac5c4396f10 100644 --- a/src/api/providers/__tests__/fireworks.spec.ts +++ b/src/api/providers/__tests__/fireworks.spec.ts @@ -115,6 +115,31 @@ describe("FireworksHandler", () => { ) }) + it("should return Kimi K2 Thinking model with correct configuration", () => { + const testModelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-thinking" + const handlerWithModel = new FireworksHandler({ + apiModelId: testModelId, + fireworksApiKey: "test-fireworks-api-key", + }) + const model = handlerWithModel.getModel() + expect(model.id).toBe(testModelId) + expect(model.info).toEqual( + expect.objectContaining({ + maxTokens: 16000, + contextWindow: 256000, + supportsImages: false, + supportsPromptCache: true, + supportsNativeTools: true, + supportsTemperature: true, + preserveReasoning: true, + defaultTemperature: 1.0, + inputPrice: 0.6, + outputPrice: 2.5, + cacheReadsPrice: 0.15, + }), + ) + }) + it("should return MiniMax M2 model with correct configuration", () => { const testModelId: FireworksModelId = "accounts/fireworks/models/minimax-m2" const handlerWithModel = new FireworksHandler({ @@ -424,16 +449,85 @@ describe("FireworksHandler", () => { ) }) - it("should use default temperature of 0.5", () => { - const testModelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-instruct" + it("should use provider default temperature of 0.5 for models without defaultTemperature", async () => { + const modelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-instruct" const handlerWithModel = new FireworksHandler({ - apiModelId: testModelId, + apiModelId: modelId, fireworksApiKey: "test-fireworks-api-key", }) - const model = handlerWithModel.getModel() - // The temperature is set in the constructor as defaultTemperature: 0.5 - // This test verifies the handler is configured with the correct default temperature - expect(handlerWithModel).toBeDefined() + + mockCreate.mockImplementationOnce(() => ({ + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + })) + + const messageGenerator = handlerWithModel.createMessage("system", []) + await messageGenerator.next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + temperature: 0.5, + }), + undefined, + ) + }) + + it("should use model defaultTemperature (1.0) over provider default (0.5) for kimi-k2-thinking", async () => { + const modelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-thinking" + const handlerWithModel = new FireworksHandler({ + apiModelId: modelId, + fireworksApiKey: "test-fireworks-api-key", + }) + + mockCreate.mockImplementationOnce(() => ({ + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + })) + + const messageGenerator = handlerWithModel.createMessage("system", []) + await messageGenerator.next() + + // Model's defaultTemperature (1.0) should take precedence over provider's default (0.5) + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + temperature: 1.0, + }), + undefined, + ) + }) + + it("should use user-specified temperature over model and provider defaults", async () => { + const modelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-thinking" + const handlerWithModel = new FireworksHandler({ + apiModelId: modelId, + fireworksApiKey: "test-fireworks-api-key", + modelTemperature: 0.7, + }) + + mockCreate.mockImplementationOnce(() => ({ + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + })) + + const messageGenerator = handlerWithModel.createMessage("system", []) + await messageGenerator.next() + + // User-specified temperature should take precedence over everything + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + temperature: 0.7, + }), + undefined, + ) }) it("should handle empty response in completePrompt", async () => { diff --git a/src/api/providers/base-openai-compatible-provider.ts b/src/api/providers/base-openai-compatible-provider.ts index 92b9558c451..5d46ce83c0a 100644 --- a/src/api/providers/base-openai-compatible-provider.ts +++ b/src/api/providers/base-openai-compatible-provider.ts @@ -84,7 +84,7 @@ export abstract class BaseOpenAiCompatibleProvider format: "openai", }) ?? undefined - const temperature = this.options.modelTemperature ?? this.defaultTemperature + const temperature = this.options.modelTemperature ?? info.defaultTemperature ?? this.defaultTemperature const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { model,