Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
112 changes: 84 additions & 28 deletions src/renderer/settings/components/OllamaProviderSettingsDetail.vue
Original file line number Diff line number Diff line change
Expand Up @@ -129,18 +129,18 @@
<template v-if="!model.pulling">
<ModelConfigItem
:model-name="model.name"
:model-id="model.name"
:model-id="model.meta?.id ?? model.name"
:provider-id="provider.id"
:is-custom-model="true"
:type="
model.capabilities.indexOf('embedding') > -1
? ModelType.Embedding
: ModelType.Chat
"
:enabled="true"
:changeable="false"
@configChanged="refreshModels"
@deleteModel="showDeleteModelConfirm(model.name)"
:type="model.type"
:enabled="model.enabled"
:vision="model.vision"
:function-call="model.functionCall"
:reasoning="model.reasoning"
:enable-search="model.enableSearch"
@enabled-change="handleModelEnabledChange(model.name, $event)"
@config-changed="refreshModels"
@delete-model="showDeleteModelConfirm(model.name)"
/>
</template>
<template v-else>
Expand Down Expand Up @@ -279,7 +279,7 @@ import {
} from '@shadcn/components/ui/dialog'
import { useSettingsStore } from '@/stores/settings'
import { useModelCheckStore } from '@/stores/modelCheck'
import type { LLM_PROVIDER } from '@shared/presenter'
import type { LLM_PROVIDER, RENDERER_MODEL_META } from '@shared/presenter'
import ModelConfigItem from '@/components/settings/ModelConfigItem.vue'
import { useToast } from '@/components/use-toast'
import { ModelType } from '@shared/model'
Expand All @@ -306,6 +306,12 @@ const checkResult = ref<boolean>(false)
const runningModels = computed(() => settingsStore.ollamaRunningModels)
const localModels = computed(() => settingsStore.ollamaLocalModels)
const pullingModels = computed(() => settingsStore.ollamaPullingModels)
const providerModelMetas = computed<RENDERER_MODEL_META[]>(() => {
const providerEntry = settingsStore.allProviderModels.find(
(item) => item.providerId === props.provider.id
)
return providerEntry?.models ?? []
})

// 预设可拉取的模型列表
const presetModels = [
Expand Down Expand Up @@ -740,43 +746,85 @@ const availableModels = computed(() => {

// 显示的本地模型(包括正在拉取的)
const displayLocalModels = computed(() => {
// 创建带有pulling状态和进度的模型列表
const models = localModels.value.map((model) => ({
...model,
pulling: pullingModels.value.has(model.name),
progress: pullingModels.value.get(model.name) || 0
}))

// 添加正在拉取但尚未出现在本地列表中的模型
const metaMap = new Map<string, RENDERER_MODEL_META & { ollamaModel?: any }>(
providerModelMetas.value.map((meta) => [
meta.id,
meta as RENDERER_MODEL_META & { ollamaModel?: any }
])
)

const models = localModels.value.map((model: any) => {
const meta = metaMap.get(model.name)
const capabilitySources: string[] = []
if (Array.isArray(model?.capabilities)) {
capabilitySources.push(...model.capabilities)
}
if (meta?.ollamaModel && Array.isArray(meta.ollamaModel?.capabilities)) {
capabilitySources.push(...(meta.ollamaModel.capabilities as string[]))
}
const capabilitySet = new Set(capabilitySources)

const resolvedType =
meta?.type ?? (capabilitySet.has('embedding') ? ModelType.Embedding : ModelType.Chat)

return {
...model,
meta,
pulling: pullingModels.value.has(model.name),
progress: pullingModels.value.get(model.name) || 0,
enabled: meta?.enabled ?? true,
vision: meta?.vision ?? capabilitySet.has('vision'),
functionCall: meta?.functionCall ?? capabilitySet.has('tools'),
reasoning: meta?.reasoning ?? capabilitySet.has('thinking'),
enableSearch: meta?.enableSearch ?? false,
type: resolvedType
}
})

for (const [modelName, progress] of pullingModels.value.entries()) {
if (!models.some((m) => m.name === modelName)) {
if (!models.some((m: any) => m.name === modelName)) {
const meta = metaMap.get(modelName)
const capabilitySources: string[] = []
if (meta?.ollamaModel && Array.isArray(meta.ollamaModel?.capabilities)) {
capabilitySources.push(...(meta.ollamaModel.capabilities as string[]))
}
const capabilitySet = new Set(capabilitySources)

const resolvedType =
meta?.type ?? (capabilitySet.has('embedding') ? ModelType.Embedding : ModelType.Chat)

models.unshift({
name: modelName,
model: modelName, // 添加必需的字段
modified_at: new Date(), // 添加必需的字段
model: modelName,
modified_at: new Date(),
size: 0,
digest: '', // 添加必需的字段
digest: '',
details: {
// 添加必需的字段
format: '',
family: '',
families: [],
parameter_size: '',
quantization_level: ''
},
model_info: {
context_length: 0,
context_length: meta?.contextLength ?? 0,
embedding_length: 0
},
capabilities: [],
pulling: true,
progress
progress,
meta,
enabled: meta?.enabled ?? true,
vision: meta?.vision ?? capabilitySet.has('vision'),
functionCall: meta?.functionCall ?? capabilitySet.has('tools'),
reasoning: meta?.reasoning ?? capabilitySet.has('thinking'),
enableSearch: meta?.enableSearch ?? false,
type: resolvedType
})
}
}

// 排序: 正在拉取的放前面,其余按名称排序
return models.sort((a, b) => {
return models.sort((a: any, b: any) => {
if (a.pulling && !b.pulling) return -1
if (!a.pulling && b.pulling) return 1
return a.name.localeCompare(b.name)
Expand Down Expand Up @@ -823,6 +871,14 @@ const showDeleteModelConfirm = (modelName: string) => {
showDeleteModelDialog.value = true
}

const handleModelEnabledChange = async (modelName: string, enabled: boolean) => {
try {
await settingsStore.updateModelStatus(props.provider.id, modelName, enabled)
} catch (error) {
console.error(`Failed to update model status for ${modelName}:`, error)
}
}

// 确认删除模型 - 使用 settings store
const confirmDeleteModel = async () => {
if (!modelToDelete.value) return
Expand Down
103 changes: 81 additions & 22 deletions src/renderer/src/stores/settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1197,30 +1197,89 @@ export const useSettingsStore = defineStore('settings', () => {
const existingOllamaModels =
allProviderModels.value.find((item) => item.providerId === 'ollama')?.models || []

const existingModelMap = new Map<string, RENDERER_MODEL_META & { ollamaModel?: OllamaModel }>(
existingOllamaModels.map((model) => [
model.id,
model as RENDERER_MODEL_META & { ollamaModel?: OllamaModel }
])
)

const modelNames = ollamaLocalModels.value.map((model) => model.name)
const modelStatusMap =
modelNames.length > 0 ? await configP.getBatchModelStatus('ollama', modelNames) : {}

// 将 Ollama 本地模型转换为全局模型格式
const ollamaModelsAsGlobal = ollamaLocalModels.value.map((model) => {
// 检查是否已存在相同ID的模型,如果存在,保留其现有的配置
const existingModel = existingOllamaModels.find((m) => m.id === model.name)
const ollamaModelsAsGlobal = await Promise.all(
ollamaLocalModels.value.map(async (model) => {
const existingModel = existingModelMap.get(model.name)
const existingModelExtra = existingModel as
| (RENDERER_MODEL_META & {
temperature?: number
reasoningEffort?: string
verbosity?: string
thinkingBudget?: number
forcedSearch?: boolean
searchStrategy?: string
})
| undefined
const modelConfig = await configP.getModelConfig(model.name, 'ollama')

return {
id: model.name,
name: model.name,
contextLength: model.model_info.context_length || 4096, // 使用模型定义值或默认值
maxTokens: existingModel?.maxTokens || 2048, // 使用现有值或默认值
provider: 'ollama',
group: existingModel?.group || 'local',
enabled: true,
isCustom: existingModel?.isCustom || false,
providerId: 'ollama',
vision: model.capabilities.indexOf('vision') > -1,
functionCall: model.capabilities.indexOf('tools') > -1,
reasoning: model.capabilities.indexOf('thinking') > -1,
type: model.capabilities.indexOf('embedding') > -1 ? ModelType.Embedding : ModelType.Chat,
// 保留现有的其他配置,但确保更新 Ollama 特有数据
...(existingModel ? { ...existingModel } : {}),
ollamaModel: model
} as RENDERER_MODEL_META & { ollamaModel: OllamaModel }
})
const capabilitySources: string[] = []
if (Array.isArray((model as any)?.capabilities)) {
capabilitySources.push(...((model as any).capabilities as string[]))
}
if (
existingModel?.ollamaModel &&
Array.isArray((existingModel.ollamaModel as any)?.capabilities)
) {
capabilitySources.push(...((existingModel.ollamaModel as any).capabilities as string[]))
}
const capabilitySet = new Set(capabilitySources)

const contextLength =
modelConfig?.contextLength ??
existingModel?.contextLength ??
(model as any)?.model_info?.context_length ??
4096

const maxTokens = modelConfig?.maxTokens ?? existingModel?.maxTokens ?? 2048

const statusFromStore = modelStatusMap[model.name]
const enabled = statusFromStore ?? existingModel?.enabled ?? true

const type =
modelConfig?.type ??
existingModel?.type ??
(capabilitySet.has('embedding') ? ModelType.Embedding : ModelType.Chat)

return {
...existingModel,
id: model.name,
name: model.name,
contextLength,
maxTokens,
provider: 'ollama',
group: existingModel?.group || 'local',
enabled,
isCustom: existingModel?.isCustom || false,
providerId: 'ollama',
vision: modelConfig?.vision ?? existingModel?.vision ?? capabilitySet.has('vision'),
functionCall:
modelConfig?.functionCall ?? existingModel?.functionCall ?? capabilitySet.has('tools'),
reasoning:
modelConfig?.reasoning ?? existingModel?.reasoning ?? capabilitySet.has('thinking'),
enableSearch: modelConfig?.enableSearch ?? existingModel?.enableSearch ?? false,
temperature: modelConfig?.temperature ?? existingModelExtra?.temperature,
reasoningEffort: modelConfig?.reasoningEffort ?? existingModelExtra?.reasoningEffort,
verbosity: modelConfig?.verbosity ?? existingModelExtra?.verbosity,
thinkingBudget: modelConfig?.thinkingBudget ?? existingModelExtra?.thinkingBudget,
forcedSearch: modelConfig?.forcedSearch ?? existingModelExtra?.forcedSearch,
searchStrategy: modelConfig?.searchStrategy ?? existingModelExtra?.searchStrategy,
type,
ollamaModel: model
} as RENDERER_MODEL_META & { ollamaModel: OllamaModel }
})
)
Comment on lines +1200 to +1282
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Prevent disabled Ollama models from surfacing; drop non-typed field; reduce IPC overhead

  • Do not add disabled provider models to enabledModels or search assistant candidates. Gate by provider.enable.
  • Remove the extraneous provider field (not in RENDERER_MODEL_META); keep providerId.
  • Consider batching config retrieval to avoid per-model IPC calls.

Apply these diffs:

  1. Remove non-typed provider and gate enabledModels/search assistant by provider status:
         return {
           ...existingModel,
           id: model.name,
           name: model.name,
           contextLength,
           maxTokens,
-          provider: 'ollama',
           group: existingModel?.group || 'local',
           enabled,
           isCustom: existingModel?.isCustom || false,
           providerId: 'ollama',
           ...
         } as RENDERER_MODEL_META & { ollamaModel: OllamaModel }
-    const enabledIndex = enabledModels.value.findIndex((item) => item.providerId === 'ollama')
-    const enabledOllamaModels = ollamaModelsAsGlobal.filter((model) => model.enabled)
-    if (enabledIndex !== -1) {
-      enabledModels.value[enabledIndex].models = enabledOllamaModels
-    } else if (enabledOllamaModels.length > 0) {
-      enabledModels.value.push({
-        providerId: 'ollama',
-        models: enabledOllamaModels
-      })
-    }
+    if (ollamaProvider.enable) {
+      const enabledIndex = enabledModels.value.findIndex((item) => item.providerId === 'ollama')
+      const enabledOllamaModels = ollamaModelsAsGlobal.filter((model) => model.enabled)
+      if (enabledIndex !== -1) {
+        enabledModels.value[enabledIndex].models = enabledOllamaModels
+      } else if (enabledOllamaModels.length > 0) {
+        enabledModels.value.push({
+          providerId: 'ollama',
+          models: enabledOllamaModels
+        })
+      }
+    }
-    await initOrUpdateSearchAssistantModel()
+    if (ollamaProvider.enable) {
+      await initOrUpdateSearchAssistantModel()
+    }
  1. Optional: batch-fetch model configs to reduce IPC calls:
-    const ollamaModelsAsGlobal = await Promise.all(
-      ollamaLocalModels.value.map(async (model) => {
-        const modelConfig = await configP.getModelConfig(model.name, 'ollama')
+    // const configs = await configP.getBatchModelConfig?.('ollama', modelNames) ?? {}
+    const ollamaModelsAsGlobal = await Promise.all(
+      ollamaLocalModels.value.map(async (model) => {
+        const modelConfig = await configP.getModelConfig(model.name, 'ollama')
         ...

If getBatchModelConfig doesn’t exist, consider adding it to the presenter for better performance.
As per coding guidelines

Also applies to: 1298-1312, 1310-1312


// 更新全局模型列表
const existingIndex = allProviderModels.value.findIndex((item) => item.providerId === 'ollama')
Expand Down