From 70bb1bb43dad460476b8d0854e83bbd7c4b9f876 Mon Sep 17 00:00:00 2001 From: zerob13 Date: Fri, 19 Sep 2025 20:07:00 +0800 Subject: [PATCH] feat: add support for zero temperature --- .../providers/anthropicProvider.ts | 16 ++++++++-------- .../providers/awsBedrockProvider.ts | 2 +- .../providers/geminiProvider.ts | 2 +- .../providers/githubCopilotProvider.ts | 4 ++-- .../providers/ollamaProvider.ts | 10 +++++----- .../mcpPresenter/inMemoryServers/imageServer.ts | 4 ++-- .../sqlitePresenter/tables/conversations.ts | 2 +- src/main/presenter/threadPresenter/index.ts | 2 +- src/renderer/src/components/ChatConfig.vue | 2 +- src/renderer/src/components/TitleView.vue | 6 ++++-- 10 files changed, 26 insertions(+), 24 deletions(-) diff --git a/src/main/presenter/llmProviderPresenter/providers/anthropicProvider.ts b/src/main/presenter/llmProviderPresenter/providers/anthropicProvider.ts index f93654778..0d09f495f 100644 --- a/src/main/presenter/llmProviderPresenter/providers/anthropicProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/anthropicProvider.ts @@ -781,7 +781,7 @@ export class AnthropicProvider extends BaseLLMProvider { requestParams = { model: modelId, max_tokens: maxTokens || 1024, - temperature: temperature || 0.7, + temperature: temperature ?? 0.7, system: "You are Claude Code, Anthropic's official CLI for Claude.", messages: oauthMessages } @@ -794,7 +794,7 @@ export class AnthropicProvider extends BaseLLMProvider { requestParams = { model: modelId, max_tokens: maxTokens || 1024, - temperature: temperature || 0.7, + temperature: temperature ?? 0.7, messages: formattedMessages.messages } @@ -898,7 +898,7 @@ ${text} requestParams = { model: modelId, max_tokens: maxTokens || 1024, - temperature: temperature || 0.7, + temperature: temperature ?? 0.7, system: "You are Claude Code, Anthropic's official CLI for Claude.", messages: [{ role: 'user', content: finalPrompt }] } @@ -909,7 +909,7 @@ ${text} requestParams = { model: modelId, max_tokens: maxTokens || 1024, - temperature: temperature || 0.7, + temperature: temperature ?? 0.7, messages: [{ role: 'user' as const, content: [{ type: 'text' as const, text: prompt }] }] } @@ -967,7 +967,7 @@ ${context} requestParams = { model: modelId, max_tokens: maxTokens || 1024, - temperature: temperature || 0.7, + temperature: temperature ?? 0.7, system: "You are Claude Code, Anthropic's official CLI for Claude.", messages: [{ role: 'user', content: finalPrompt }] } @@ -978,7 +978,7 @@ ${context} requestParams = { model: modelId, max_tokens: maxTokens || 1024, - temperature: temperature || 0.7, + temperature: temperature ?? 0.7, messages: [{ role: 'user' as const, content: [{ type: 'text' as const, text: prompt }] }] } @@ -1042,7 +1042,7 @@ ${context} const streamParams = { model: modelId, max_tokens: maxTokens || 1024, - temperature: temperature || 0.7, + temperature: temperature ?? 0.7, messages: formattedMessagesObject.messages, stream: true } as Anthropic.Messages.MessageCreateParamsStreaming @@ -1266,7 +1266,7 @@ ${context} const streamParams: any = { model: modelId, max_tokens: maxTokens || 1024, - temperature: temperature || 0.7, + temperature: temperature ?? 0.7, system: "You are Claude Code, Anthropic's official CLI for Claude.", messages: oauthMessages, stream: true diff --git a/src/main/presenter/llmProviderPresenter/providers/awsBedrockProvider.ts b/src/main/presenter/llmProviderPresenter/providers/awsBedrockProvider.ts index fecd91d0c..0ee052b26 100644 --- a/src/main/presenter/llmProviderPresenter/providers/awsBedrockProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/awsBedrockProvider.ts @@ -658,7 +658,7 @@ ${text} const payload = { anthropic_version: 'bedrock-2023-05-31', max_tokens: maxTokens || 1024, - temperature: temperature || 0.7, + temperature: temperature ?? 0.7, // system: formattedMessagesObject.system, messages: formattedMessagesObject.messages, thinking: undefined as any, diff --git a/src/main/presenter/llmProviderPresenter/providers/geminiProvider.ts b/src/main/presenter/llmProviderPresenter/providers/geminiProvider.ts index e50623045..5eb6df409 100644 --- a/src/main/presenter/llmProviderPresenter/providers/geminiProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/geminiProvider.ts @@ -649,7 +649,7 @@ export class GeminiProvider extends BaseLLMProvider { // 创建 GenerateContentConfig const generateContentConfig: GenerateContentConfig = this.getGenerateContentConfig( - temperature || 0.7, + temperature ?? 0.7, maxTokens, modelId, false // completions 方法中不处理 reasoning diff --git a/src/main/presenter/llmProviderPresenter/providers/githubCopilotProvider.ts b/src/main/presenter/llmProviderPresenter/providers/githubCopilotProvider.ts index 84de69896..a96899413 100644 --- a/src/main/presenter/llmProviderPresenter/providers/githubCopilotProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/githubCopilotProvider.ts @@ -350,7 +350,7 @@ export class GithubCopilotProvider extends BaseLLMProvider { messages: formattedMessages, max_tokens: maxTokens || 4096, stream: true, - temperature: temperature || 0.7 + temperature: temperature ?? 0.7 } // Add tools when available @@ -506,7 +506,7 @@ export class GithubCopilotProvider extends BaseLLMProvider { messages: formattedMessages, max_tokens: maxTokens || 4096, stream: false, - temperature: temperature || 0.7 + temperature: temperature ?? 0.7 } const headers: Record = { diff --git a/src/main/presenter/llmProviderPresenter/providers/ollamaProvider.ts b/src/main/presenter/llmProviderPresenter/providers/ollamaProvider.ts index f70091eb6..a9e5e1733 100644 --- a/src/main/presenter/llmProviderPresenter/providers/ollamaProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/ollamaProvider.ts @@ -158,7 +158,7 @@ export class OllamaProvider extends BaseLLMProvider { model: modelId, messages: this.formatMessages(messages), options: { - temperature: temperature || 0.7, + temperature: temperature ?? 0.7, num_predict: maxTokens } }) @@ -226,7 +226,7 @@ export class OllamaProvider extends BaseLLMProvider { model: modelId, prompt: prompt, options: { - temperature: temperature || 0.5, + temperature: temperature ?? 0.5, num_predict: maxTokens } }) @@ -252,7 +252,7 @@ export class OllamaProvider extends BaseLLMProvider { model: modelId, prompt: prompt, options: { - temperature: temperature || 0.7, + temperature: temperature ?? 0.7, num_predict: maxTokens } }) @@ -280,7 +280,7 @@ export class OllamaProvider extends BaseLLMProvider { model: modelId, prompt: prompt, options: { - temperature: temperature || 0.8, + temperature: temperature ?? 0.8, num_predict: maxTokens || 200 } }) @@ -499,7 +499,7 @@ export class OllamaProvider extends BaseLLMProvider { model: modelId, messages: processedMessages, options: { - temperature: temperature || 0.7, + temperature: temperature ?? 0.7, num_predict: maxTokens }, stream: true as const, diff --git a/src/main/presenter/mcpPresenter/inMemoryServers/imageServer.ts b/src/main/presenter/mcpPresenter/inMemoryServers/imageServer.ts index 5922fea1b..0be93e63e 100644 --- a/src/main/presenter/mcpPresenter/inMemoryServers/imageServer.ts +++ b/src/main/presenter/mcpPresenter/inMemoryServers/imageServer.ts @@ -124,7 +124,7 @@ export class ImageServer { this.provider, messages, this.model, - modelConfig?.temperature || 0.6, + modelConfig?.temperature ?? 0.6, modelConfig?.maxTokens || 1000 ) console.log(`Model response received: ${response}`) @@ -171,7 +171,7 @@ export class ImageServer { this.provider, messages, this.model, - modelConfig?.temperature || 0.6, + modelConfig?.temperature ?? 0.6, modelConfig?.maxTokens || 1000 ) console.log(`OCR text received: ${ocrText}`) diff --git a/src/main/presenter/sqlitePresenter/tables/conversations.ts b/src/main/presenter/sqlitePresenter/tables/conversations.ts index 5e88009b2..1143d15ed 100644 --- a/src/main/presenter/sqlitePresenter/tables/conversations.ts +++ b/src/main/presenter/sqlitePresenter/tables/conversations.ts @@ -159,7 +159,7 @@ export class ConversationsTable extends BaseTable { now, now, settings.systemPrompt || '', - settings.temperature || 0.7, + settings.temperature ?? 0.7, settings.contextLength || 4000, settings.maxTokens || 2000, settings.providerId || 'openai', diff --git a/src/main/presenter/threadPresenter/index.ts b/src/main/presenter/threadPresenter/index.ts index 73280f865..7aec8945e 100644 --- a/src/main/presenter/threadPresenter/index.ts +++ b/src/main/presenter/threadPresenter/index.ts @@ -1046,7 +1046,7 @@ export class ThreadPresenter implements IThreadPresenter { if (settings.maxTokens) { mergedSettings.maxTokens = settings.maxTokens } - if (settings.temperature) { + if (settings.temperature !== undefined && settings.temperature !== null) { mergedSettings.temperature = settings.temperature } if (settings.contextLength) { diff --git a/src/renderer/src/components/ChatConfig.vue b/src/renderer/src/components/ChatConfig.vue index d0c388c09..552731056 100644 --- a/src/renderer/src/components/ChatConfig.vue +++ b/src/renderer/src/components/ChatConfig.vue @@ -436,7 +436,7 @@ const qwen3ThinkingBudgetError = computed(() => { {{ temperatureValue[0] }} - + diff --git a/src/renderer/src/components/TitleView.vue b/src/renderer/src/components/TitleView.vue index 9a25ed6bc..86c255b92 100644 --- a/src/renderer/src/components/TitleView.vue +++ b/src/renderer/src/components/TitleView.vue @@ -163,8 +163,10 @@ const loadModelConfig = async () => { } else if (maxTokens.value < 1024) { maxTokens.value = 1024 } - // reset to default temperature - temperature.value = config.temperature ?? 0.6 + // Do not override user-set temperature; only set if unset + if (temperature.value === undefined || temperature.value === null) { + temperature.value = config.temperature ?? 0.6 + } if (config.thinkingBudget !== undefined) { if (thinkingBudget.value === undefined) {