diff --git a/src/main/presenter/configPresenter/providers.ts b/src/main/presenter/configPresenter/providers.ts index 4dccafd56..69320a80c 100644 --- a/src/main/presenter/configPresenter/providers.ts +++ b/src/main/presenter/configPresenter/providers.ts @@ -395,21 +395,21 @@ export const DEFAULT_PROVIDERS: LLM_PROVIDER_BASE[] = [ // } // } - // { - // id: 'groq', - // name: 'Groq', - // apiType: 'groq', - // apiKey: '', - // baseUrl: 'https://api.groq.com/openai', - // enable: false, - // websites: { - // official: 'https://groq.com/', - // apiKey: 'https://console.groq.com/keys', - // docs: 'https://console.groq.com/docs/quickstart', - // models: 'https://console.groq.com/docs/models', - // defaultBaseUrl: 'https://api.groq.com/openai' - // } - // }, + { + id: 'groq', + name: 'Groq', + apiType: 'groq', + apiKey: '', + baseUrl: 'https://api.groq.com/openai/v1', + enable: false, + websites: { + official: 'https://groq.com/', + apiKey: 'https://console.groq.com/keys', + docs: 'https://console.groq.com/docs/quickstart', + models: 'https://console.groq.com/docs/models', + defaultBaseUrl: 'https://api.groq.com/openai/v1' + } + }, { id: 'grok', diff --git a/src/main/presenter/llmProviderPresenter/index.ts b/src/main/presenter/llmProviderPresenter/index.ts index a427a0e27..440da0bdc 100644 --- a/src/main/presenter/llmProviderPresenter/index.ts +++ b/src/main/presenter/llmProviderPresenter/index.ts @@ -28,6 +28,7 @@ import { ShowResponse } from 'ollama' import { CONFIG_EVENTS } from '@/events' import { TogetherProvider } from './providers/togetherProvider' import { GrokProvider } from './providers/grokProvider' +import { GroqProvider } from './providers/groqProvider' import { presenter } from '@/presenter' import { ZhipuProvider } from './providers/zhipuProvider' import { LMStudioProvider } from './providers/lmstudioProvider' @@ -153,6 +154,8 @@ export class LLMProviderPresenter implements ILlmProviderPresenter { return new LMStudioProvider(provider, this.configPresenter) case 'together': return new TogetherProvider(provider, this.configPresenter) + case 'groq': + return new GroqProvider(provider, this.configPresenter) default: console.warn(`Unknown provider type: ${provider.apiType}`) return undefined diff --git a/src/main/presenter/llmProviderPresenter/providers/groqProvider.ts b/src/main/presenter/llmProviderPresenter/providers/groqProvider.ts new file mode 100644 index 000000000..777a8b405 --- /dev/null +++ b/src/main/presenter/llmProviderPresenter/providers/groqProvider.ts @@ -0,0 +1,180 @@ +import { LLM_PROVIDER, LLMResponse, ChatMessage, MODEL_META } from '@shared/presenter' +import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import { ConfigPresenter } from '../../configPresenter' + +// Define interface for Groq model response (following PPIO naming convention) +interface GroqModelResponse { + id: string + object: string + owned_by: string + created: number + display_name?: string + description?: string + context_size: number // Groq uses context_window, but we'll map it to context_size + max_output_tokens: number // Groq may use max_tokens, but we'll map it to max_output_tokens + features?: string[] + status?: number // Groq uses active boolean, we'll map it to status number + model_type?: string + // Groq specific fields that we need to handle + active?: boolean + context_window?: number + max_tokens?: number + public_apps?: boolean +} + +export class GroqProvider extends OpenAICompatibleProvider { + constructor(provider: LLM_PROVIDER, configPresenter: ConfigPresenter) { + super(provider, configPresenter) + } + + async completions( + messages: ChatMessage[], + modelId: string, + temperature?: number, + maxTokens?: number + ): Promise { + return this.openAICompletion(messages, modelId, temperature, maxTokens) + } + + async summaries( + text: string, + modelId: string, + temperature?: number, + maxTokens?: number + ): Promise { + return this.openAICompletion( + [ + { + role: 'user', + content: `Please summarize the following content using concise language and highlighting key points:\n${text}` + } + ], + modelId, + temperature, + maxTokens + ) + } + + async generateText( + prompt: string, + modelId: string, + temperature?: number, + maxTokens?: number + ): Promise { + return this.openAICompletion( + [ + { + role: 'user', + content: prompt + } + ], + modelId, + temperature, + maxTokens + ) + } + + /** + * Override fetchOpenAIModels to parse Groq specific model data and update model configurations + * @param options - Request options + * @returns Promise - Array of model metadata + */ + protected async fetchOpenAIModels(options?: { timeout: number }): Promise { + try { + const response = await this.openai.models.list(options) + // console.log('Groq models response:', JSON.stringify(response, null, 2)) + + const models: MODEL_META[] = [] + + for (const model of response.data) { + // Type the model as Groq specific response + const groqModel = model as unknown as GroqModelResponse + + // Skip inactive models (map Groq's active field to status) + const modelStatus = groqModel.status ?? (groqModel.active ? 1 : 0) + if (modelStatus === 0 || groqModel.active === false) { + continue + } + + // Extract model information + const modelId = groqModel.id + const features = groqModel.features || [] + + // Map Groq fields to PPIO-style naming + const contextSize = groqModel.context_size || groqModel.context_window || 4096 + const maxOutputTokens = groqModel.max_output_tokens || groqModel.max_tokens || 2048 + + // Check features for capabilities or infer from model name + const hasFunctionCalling = features.includes('function-calling') || + (!modelId.toLowerCase().includes('distil') && !modelId.toLowerCase().includes('gemma')) + const hasVision = features.includes('vision') || + modelId.toLowerCase().includes('vision') || + modelId.toLowerCase().includes('llava') + + // Get existing model configuration first + const existingConfig = this.configPresenter.getModelConfig(modelId, this.provider.id) + + // Extract configuration values with proper fallback priority: API -> existing config -> default + const contextLength = contextSize || existingConfig.contextLength || 4096 + const maxTokens = maxOutputTokens || existingConfig.maxTokens || 2048 + + // Build new configuration based on API response + const newConfig = { + contextLength: contextLength, + maxTokens: maxTokens, + functionCall: hasFunctionCalling, + vision: hasVision, + reasoning: existingConfig.reasoning, // Keep existing reasoning setting + temperature: existingConfig.temperature, // Keep existing temperature + type: existingConfig.type // Keep existing type + } + + // Check if configuration has changed + const configChanged = + existingConfig.contextLength !== newConfig.contextLength || + existingConfig.maxTokens !== newConfig.maxTokens || + existingConfig.functionCall !== newConfig.functionCall || + existingConfig.vision !== newConfig.vision + + // Update configuration if changed + if (configChanged) { + // console.log(`Updating configuration for model ${modelId}:`, { + // old: { + // contextLength: existingConfig.contextLength, + // maxTokens: existingConfig.maxTokens, + // functionCall: existingConfig.functionCall, + // vision: existingConfig.vision + // }, + // new: newConfig + // }) + + this.configPresenter.setModelConfig(modelId, this.provider.id, newConfig) + } + + // Create MODEL_META object + const modelMeta: MODEL_META = { + id: modelId, + name: groqModel.display_name || modelId, + group: 'default', + providerId: this.provider.id, + isCustom: false, + contextLength: contextLength, + maxTokens: maxTokens, + description: groqModel.description || `Groq model ${modelId}`, + vision: hasVision, + functionCall: hasFunctionCalling, + reasoning: existingConfig.reasoning || false + } + + models.push(modelMeta) + } + + console.log(`Processed ${models.length} Groq models with dynamic configuration updates`) + return models + } catch (error) { + console.error('Error fetching Groq models:', error) + // Fallback to parent implementation + return super.fetchOpenAIModels(options) + } + } +} \ No newline at end of file diff --git a/src/renderer/src/assets/llm-icons/groq.svg b/src/renderer/src/assets/llm-icons/groq.svg index 7294646b9..d3480d0ea 100644 --- a/src/renderer/src/assets/llm-icons/groq.svg +++ b/src/renderer/src/assets/llm-icons/groq.svg @@ -1 +1,25 @@ -Groq \ No newline at end of file + + + + + + + + + diff --git a/src/renderer/src/components/icons/ModelIcon.vue b/src/renderer/src/components/icons/ModelIcon.vue index 0e7621888..2d00f3c58 100644 --- a/src/renderer/src/components/icons/ModelIcon.vue +++ b/src/renderer/src/components/icons/ModelIcon.vue @@ -49,6 +49,7 @@ import claudeColorIcon from '@/assets/llm-icons/claude-color.svg?url' import googleColorIcon from '@/assets/llm-icons/google-color.svg?url' import qiniuIcon from '@/assets/llm-icons/qiniu.svg?url' import grokColorIcon from '@/assets/llm-icons/grok.svg?url' +import groqColorIcon from '@/assets/llm-icons/groq.svg?url' import hunyuanColorIcon from '@/assets/llm-icons/hunyuan-color.svg?url' import dashscopeColorIcon from '@/assets/llm-icons/alibabacloud-color.svg?url' import aihubmixColorIcon from '@/assets/llm-icons/aihubmix.png?url' @@ -63,6 +64,7 @@ const icons = { dashscope: dashscopeColorIcon, hunyuan: hunyuanColorIcon, grok: grokColorIcon, + groq: groqColorIcon, qiniu: qiniuIcon, gemma: googleColorIcon, claude: claudeColorIcon, @@ -155,6 +157,7 @@ const invert = computed(() => { props.modelId.toLowerCase().includes('openrouter') || props.modelId.toLowerCase().includes('ollama') || props.modelId.toLowerCase().includes('grok') || + props.modelId.toLowerCase().includes('groq') || props.modelId.toLowerCase().includes('github') || props.modelId.toLowerCase().includes('moonshot') || props.modelId.toLowerCase().includes('lmstudio')