diff --git a/src/renderer/settings/components/OllamaProviderSettingsDetail.vue b/src/renderer/settings/components/OllamaProviderSettingsDetail.vue
index 91d7dfdf5..019d5ddad 100644
--- a/src/renderer/settings/components/OllamaProviderSettingsDetail.vue
+++ b/src/renderer/settings/components/OllamaProviderSettingsDetail.vue
@@ -129,18 +129,18 @@
@@ -279,7 +279,7 @@ import {
} from '@shadcn/components/ui/dialog'
import { useSettingsStore } from '@/stores/settings'
import { useModelCheckStore } from '@/stores/modelCheck'
-import type { LLM_PROVIDER } from '@shared/presenter'
+import type { LLM_PROVIDER, RENDERER_MODEL_META } from '@shared/presenter'
import ModelConfigItem from '@/components/settings/ModelConfigItem.vue'
import { useToast } from '@/components/use-toast'
import { ModelType } from '@shared/model'
@@ -306,6 +306,12 @@ const checkResult = ref(false)
const runningModels = computed(() => settingsStore.ollamaRunningModels)
const localModels = computed(() => settingsStore.ollamaLocalModels)
const pullingModels = computed(() => settingsStore.ollamaPullingModels)
+const providerModelMetas = computed(() => {
+ const providerEntry = settingsStore.allProviderModels.find(
+ (item) => item.providerId === props.provider.id
+ )
+ return providerEntry?.models ?? []
+})
// 预设可拉取的模型列表
const presetModels = [
@@ -740,24 +746,60 @@ const availableModels = computed(() => {
// 显示的本地模型(包括正在拉取的)
const displayLocalModels = computed(() => {
- // 创建带有pulling状态和进度的模型列表
- const models = localModels.value.map((model) => ({
- ...model,
- pulling: pullingModels.value.has(model.name),
- progress: pullingModels.value.get(model.name) || 0
- }))
-
- // 添加正在拉取但尚未出现在本地列表中的模型
+ const metaMap = new Map(
+ providerModelMetas.value.map((meta) => [
+ meta.id,
+ meta as RENDERER_MODEL_META & { ollamaModel?: any }
+ ])
+ )
+
+ const models = localModels.value.map((model: any) => {
+ const meta = metaMap.get(model.name)
+ const capabilitySources: string[] = []
+ if (Array.isArray(model?.capabilities)) {
+ capabilitySources.push(...model.capabilities)
+ }
+ if (meta?.ollamaModel && Array.isArray(meta.ollamaModel?.capabilities)) {
+ capabilitySources.push(...(meta.ollamaModel.capabilities as string[]))
+ }
+ const capabilitySet = new Set(capabilitySources)
+
+ const resolvedType =
+ meta?.type ?? (capabilitySet.has('embedding') ? ModelType.Embedding : ModelType.Chat)
+
+ return {
+ ...model,
+ meta,
+ pulling: pullingModels.value.has(model.name),
+ progress: pullingModels.value.get(model.name) || 0,
+ enabled: meta?.enabled ?? true,
+ vision: meta?.vision ?? capabilitySet.has('vision'),
+ functionCall: meta?.functionCall ?? capabilitySet.has('tools'),
+ reasoning: meta?.reasoning ?? capabilitySet.has('thinking'),
+ enableSearch: meta?.enableSearch ?? false,
+ type: resolvedType
+ }
+ })
+
for (const [modelName, progress] of pullingModels.value.entries()) {
- if (!models.some((m) => m.name === modelName)) {
+ if (!models.some((m: any) => m.name === modelName)) {
+ const meta = metaMap.get(modelName)
+ const capabilitySources: string[] = []
+ if (meta?.ollamaModel && Array.isArray(meta.ollamaModel?.capabilities)) {
+ capabilitySources.push(...(meta.ollamaModel.capabilities as string[]))
+ }
+ const capabilitySet = new Set(capabilitySources)
+
+ const resolvedType =
+ meta?.type ?? (capabilitySet.has('embedding') ? ModelType.Embedding : ModelType.Chat)
+
models.unshift({
name: modelName,
- model: modelName, // 添加必需的字段
- modified_at: new Date(), // 添加必需的字段
+ model: modelName,
+ modified_at: new Date(),
size: 0,
- digest: '', // 添加必需的字段
+ digest: '',
details: {
- // 添加必需的字段
format: '',
family: '',
families: [],
@@ -765,18 +807,24 @@ const displayLocalModels = computed(() => {
quantization_level: ''
},
model_info: {
- context_length: 0,
+ context_length: meta?.contextLength ?? 0,
embedding_length: 0
},
capabilities: [],
pulling: true,
- progress
+ progress,
+ meta,
+ enabled: meta?.enabled ?? true,
+ vision: meta?.vision ?? capabilitySet.has('vision'),
+ functionCall: meta?.functionCall ?? capabilitySet.has('tools'),
+ reasoning: meta?.reasoning ?? capabilitySet.has('thinking'),
+ enableSearch: meta?.enableSearch ?? false,
+ type: resolvedType
})
}
}
- // 排序: 正在拉取的放前面,其余按名称排序
- return models.sort((a, b) => {
+ return models.sort((a: any, b: any) => {
if (a.pulling && !b.pulling) return -1
if (!a.pulling && b.pulling) return 1
return a.name.localeCompare(b.name)
@@ -823,6 +871,14 @@ const showDeleteModelConfirm = (modelName: string) => {
showDeleteModelDialog.value = true
}
+const handleModelEnabledChange = async (modelName: string, enabled: boolean) => {
+ try {
+ await settingsStore.updateModelStatus(props.provider.id, modelName, enabled)
+ } catch (error) {
+ console.error(`Failed to update model status for ${modelName}:`, error)
+ }
+}
+
// 确认删除模型 - 使用 settings store
const confirmDeleteModel = async () => {
if (!modelToDelete.value) return
diff --git a/src/renderer/src/stores/settings.ts b/src/renderer/src/stores/settings.ts
index 8beac3473..96570c8b2 100644
--- a/src/renderer/src/stores/settings.ts
+++ b/src/renderer/src/stores/settings.ts
@@ -1197,30 +1197,89 @@ export const useSettingsStore = defineStore('settings', () => {
const existingOllamaModels =
allProviderModels.value.find((item) => item.providerId === 'ollama')?.models || []
+ const existingModelMap = new Map(
+ existingOllamaModels.map((model) => [
+ model.id,
+ model as RENDERER_MODEL_META & { ollamaModel?: OllamaModel }
+ ])
+ )
+
+ const modelNames = ollamaLocalModels.value.map((model) => model.name)
+ const modelStatusMap =
+ modelNames.length > 0 ? await configP.getBatchModelStatus('ollama', modelNames) : {}
+
// 将 Ollama 本地模型转换为全局模型格式
- const ollamaModelsAsGlobal = ollamaLocalModels.value.map((model) => {
- // 检查是否已存在相同ID的模型,如果存在,保留其现有的配置
- const existingModel = existingOllamaModels.find((m) => m.id === model.name)
+ const ollamaModelsAsGlobal = await Promise.all(
+ ollamaLocalModels.value.map(async (model) => {
+ const existingModel = existingModelMap.get(model.name)
+ const existingModelExtra = existingModel as
+ | (RENDERER_MODEL_META & {
+ temperature?: number
+ reasoningEffort?: string
+ verbosity?: string
+ thinkingBudget?: number
+ forcedSearch?: boolean
+ searchStrategy?: string
+ })
+ | undefined
+ const modelConfig = await configP.getModelConfig(model.name, 'ollama')
- return {
- id: model.name,
- name: model.name,
- contextLength: model.model_info.context_length || 4096, // 使用模型定义值或默认值
- maxTokens: existingModel?.maxTokens || 2048, // 使用现有值或默认值
- provider: 'ollama',
- group: existingModel?.group || 'local',
- enabled: true,
- isCustom: existingModel?.isCustom || false,
- providerId: 'ollama',
- vision: model.capabilities.indexOf('vision') > -1,
- functionCall: model.capabilities.indexOf('tools') > -1,
- reasoning: model.capabilities.indexOf('thinking') > -1,
- type: model.capabilities.indexOf('embedding') > -1 ? ModelType.Embedding : ModelType.Chat,
- // 保留现有的其他配置,但确保更新 Ollama 特有数据
- ...(existingModel ? { ...existingModel } : {}),
- ollamaModel: model
- } as RENDERER_MODEL_META & { ollamaModel: OllamaModel }
- })
+ const capabilitySources: string[] = []
+ if (Array.isArray((model as any)?.capabilities)) {
+ capabilitySources.push(...((model as any).capabilities as string[]))
+ }
+ if (
+ existingModel?.ollamaModel &&
+ Array.isArray((existingModel.ollamaModel as any)?.capabilities)
+ ) {
+ capabilitySources.push(...((existingModel.ollamaModel as any).capabilities as string[]))
+ }
+ const capabilitySet = new Set(capabilitySources)
+
+ const contextLength =
+ modelConfig?.contextLength ??
+ existingModel?.contextLength ??
+ (model as any)?.model_info?.context_length ??
+ 4096
+
+ const maxTokens = modelConfig?.maxTokens ?? existingModel?.maxTokens ?? 2048
+
+ const statusFromStore = modelStatusMap[model.name]
+ const enabled = statusFromStore ?? existingModel?.enabled ?? true
+
+ const type =
+ modelConfig?.type ??
+ existingModel?.type ??
+ (capabilitySet.has('embedding') ? ModelType.Embedding : ModelType.Chat)
+
+ return {
+ ...existingModel,
+ id: model.name,
+ name: model.name,
+ contextLength,
+ maxTokens,
+ provider: 'ollama',
+ group: existingModel?.group || 'local',
+ enabled,
+ isCustom: existingModel?.isCustom || false,
+ providerId: 'ollama',
+ vision: modelConfig?.vision ?? existingModel?.vision ?? capabilitySet.has('vision'),
+ functionCall:
+ modelConfig?.functionCall ?? existingModel?.functionCall ?? capabilitySet.has('tools'),
+ reasoning:
+ modelConfig?.reasoning ?? existingModel?.reasoning ?? capabilitySet.has('thinking'),
+ enableSearch: modelConfig?.enableSearch ?? existingModel?.enableSearch ?? false,
+ temperature: modelConfig?.temperature ?? existingModelExtra?.temperature,
+ reasoningEffort: modelConfig?.reasoningEffort ?? existingModelExtra?.reasoningEffort,
+ verbosity: modelConfig?.verbosity ?? existingModelExtra?.verbosity,
+ thinkingBudget: modelConfig?.thinkingBudget ?? existingModelExtra?.thinkingBudget,
+ forcedSearch: modelConfig?.forcedSearch ?? existingModelExtra?.forcedSearch,
+ searchStrategy: modelConfig?.searchStrategy ?? existingModelExtra?.searchStrategy,
+ type,
+ ollamaModel: model
+ } as RENDERER_MODEL_META & { ollamaModel: OllamaModel }
+ })
+ )
// 更新全局模型列表
const existingIndex = allProviderModels.value.findIndex((item) => item.providerId === 'ollama')