-
Notifications
You must be signed in to change notification settings - Fork 2.8k
feat: add GPT-OSS 120b and 20b models to Groq provider #6732
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -11,6 +11,8 @@ export type GroqModelId = | |
| | "qwen/qwen3-32b" | ||
| | "deepseek-r1-distill-llama-70b" | ||
| | "moonshotai/kimi-k2-instruct" | ||
| | "openai/gpt-oss-120b" | ||
| | "openai/gpt-oss-20b" | ||
|
|
||
| export const groqDefaultModelId: GroqModelId = "llama-3.3-70b-versatile" // Defaulting to Llama3 70B Versatile | ||
|
|
||
|
|
@@ -97,4 +99,24 @@ export const groqModels = { | |
| outputPrice: 3.0, | ||
| description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context.", | ||
| }, | ||
| "openai/gpt-oss-120b": { | ||
| maxTokens: 32766, | ||
| contextWindow: 131072, | ||
| supportsImages: false, | ||
| supportsPromptCache: false, | ||
| inputPrice: 0.15, | ||
| outputPrice: 0.75, | ||
| description: | ||
| "GPT-OSS 120B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 128 experts.", | ||
| }, | ||
| "openai/gpt-oss-20b": { | ||
| maxTokens: 32768, | ||
| contextWindow: 131072, | ||
| supportsImages: false, | ||
| supportsPromptCache: false, | ||
| inputPrice: 0.1, | ||
| outputPrice: 0.5, | ||
| description: | ||
| "GPT-OSS 20B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 32 experts.", | ||
|
||
| }, | ||
| } as const satisfies Record<string, ModelInfo> | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The PR description mentions that these models support tool use, browser search, code execution, and JSON object mode. Should we add these as boolean flags in the model info? For example: