Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/conversations-api-implementation.md
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ const sendMessage = async (userInput: string, conversationId: string) => {
try {
// Create streaming response
const stream = await openai.responses.create({
model: "ibnzterrell/Meta-Llama-3.3-70B-Instruct-AWQ-INT4", // Or user's selected model
model: "llama3-3-70b", // Or user's selected model
conversation: conversationId,
input: [{ role: "user", content: userInput }],
stream: true, // Enable streaming
Expand Down
2 changes: 1 addition & 1 deletion frontend/public/llms-full.txt
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ All pricing is pay-as-you-go. Purchase credits in $10 increments.

| Model | Price per million tokens |
|---|---|
| llama-3.3-70b | $4 input / $4 output |
| llama3-3-70b | $4 input / $4 output |
| gpt-oss-120b | $4 input / $4 output |
| kimi-k2-5 | $4 input / $4 output |
| qwen3-vl-30b | $4 input / $4 output |
Expand Down
23 changes: 7 additions & 16 deletions frontend/src/components/ModelSelector.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import { useOpenSecret } from "@opensecret/react";
import { useEffect, useRef, useState } from "react";
import type { Model } from "openai/resources/models.js";
import { UpgradePromptDialog } from "@/components/UpgradePromptDialog";
import { aliasModelName, LLAMA_MODEL_ID } from "@/utils/utils";

// Model configuration for display names, badges, and token limits
type ModelCfg = {
Expand All @@ -26,12 +27,7 @@ type ModelCfg = {
};

export const MODEL_CONFIG: Record<string, ModelCfg> = {
"ibnzterrell/Meta-Llama-3.3-70B-Instruct-AWQ-INT4": {
displayName: "Llama 3.3 70B",
shortName: "Llama 3.3",
tokenLimit: 70000
},
"llama-3.3-70b": {
[LLAMA_MODEL_ID]: {
displayName: "Llama 3.3 70B",
shortName: "Llama 3.3",
tokenLimit: 70000
Expand Down Expand Up @@ -79,7 +75,7 @@ export const DEFAULT_TOKEN_LIMIT = 64000;

// Get token limit for a specific model
export function getModelTokenLimit(modelId: string): number {
return MODEL_CONFIG[modelId]?.tokenLimit || DEFAULT_TOKEN_LIMIT;
return MODEL_CONFIG[aliasModelName(modelId)]?.tokenLimit || DEFAULT_TOKEN_LIMIT;
}

// Primary model options
Expand Down Expand Up @@ -176,8 +172,10 @@ export function ModelSelector({ hasImages = false }: { hasImages?: boolean }) {

// Get current models for merging from ref
const currentModels = availableModelsRef.current || [];
const existingModelIds = new Set(currentModels.map((m) => m.id));
const newModels = filteredModels.filter((m) => !existingModelIds.has(m.id));
const existingModelIds = new Set(currentModels.map((m) => aliasModelName(m.id)));
const newModels = filteredModels.filter(
(m) => !existingModelIds.has(aliasModelName(m.id))
);

// Merge with existing models (keeping the hardcoded one)
setAvailableModels([...currentModels, ...newModels]);
Expand Down Expand Up @@ -448,13 +446,6 @@ export function ModelSelector({ hasImages = false }: { hasImages?: boolean }) {
Array.isArray(availableModels) &&
[...availableModels]
.filter((m) => MODEL_CONFIG[m.id] !== undefined)
// Deduplicate: prefer short names over long names
.filter((m) => {
if (m.id === "ibnzterrell/Meta-Llama-3.3-70B-Instruct-AWQ-INT4") {
return !availableModels.some((model) => model.id === "llama-3.3-70b");
}
return true;
})
// Remove duplicates by id
.filter(
(m, index, self) =>
Expand Down
5 changes: 3 additions & 2 deletions frontend/src/components/apikeys/ProxyConfigSection.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import { Alert, AlertDescription } from "@/components/ui/alert";
import { Play, Square, Loader2, AlertCircle, CheckCircle, Server, Copy, Check } from "lucide-react";
import { proxyService, ProxyConfig, ProxyStatus } from "@/services/proxyService";
import { isTauriDesktop } from "@/utils/platform";
import { LLAMA_MODEL_ID } from "@/utils/utils";

interface ProxyConfigSectionProps {
apiKeys: Array<{ name: string; created_at: string }>;
Expand Down Expand Up @@ -339,7 +340,7 @@ client = OpenAI(
)

response = client.chat.completions.create(
model="llama-3.3-70b",
model="${LLAMA_MODEL_ID}",
messages=[{"role": "user", "content": "Hello!"}],
stream=True
)
Expand All @@ -357,7 +358,7 @@ for chunk in response:
<code>{`curl -N http://${config.host}:${config.port}/v1/chat/completions \\
-H "Content-Type: application/json" \\
-d '{
"model": "llama-3.3-70b",
"model": "${LLAMA_MODEL_ID}",
"messages": [{"role": "user", "content": "Hello!"}],
"stream": true
}'`}</code>
Expand Down
24 changes: 21 additions & 3 deletions frontend/src/state/LocalStateContext.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,21 @@ function getInitialModel(): string {
return DEFAULT_MODEL_ID;
}

function normalizeAvailableModels(models: OpenSecretModel[]): OpenSecretModel[] {
const normalizedModels = new Map<string, OpenSecretModel>();

for (const model of models) {
const normalizedId = aliasModelName(model.id);
const normalizedModel = normalizedId === model.id ? model : { ...model, id: normalizedId };

if (!normalizedModels.has(normalizedId) || model.id === normalizedId) {
normalizedModels.set(normalizedId, normalizedModel);
}
}

return Array.from(normalizedModels.values());
}

export const LocalStateProvider = ({ children }: { children: React.ReactNode }) => {
/** The model that should be assumed when a chat doesn't yet have one */
const defaultModel: OpenSecretModel = {
Expand All @@ -62,7 +77,7 @@ export const LocalStateProvider = ({ children }: { children: React.ReactNode })
userImages: [] as File[],
sentViaVoice: false,
model: getInitialModel(),
availableModels: [defaultModel] as OpenSecretModel[],
availableModels: normalizeAvailableModels([defaultModel]),
hasWhisperModel: true, // Default to true to avoid hiding button during loading
billingStatus: null as BillingStatus | null,
searchQuery: "",
Expand All @@ -77,7 +92,7 @@ export const LocalStateProvider = ({ children }: { children: React.ReactNode })
const chatToSave = {
...chat,

/** If a model is missing, assume the default Llama and write it now */
/** If a model is missing, assume the default model and write it now */
model: aliasModelName(chat.model) || DEFAULT_MODEL_ID
};

Expand Down Expand Up @@ -377,7 +392,10 @@ export const LocalStateProvider = ({ children }: { children: React.ReactNode })
}

function setAvailableModels(models: OpenSecretModel[]) {
setLocalState((prev) => ({ ...prev, availableModels: models }));
setLocalState((prev) => ({
...prev,
availableModels: normalizeAvailableModels(models)
}));
}

function setHasWhisperModel(hasWhisper: boolean) {
Expand Down
41 changes: 11 additions & 30 deletions frontend/src/utils/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -74,37 +74,18 @@ export function useClickOutside(
}, [ref, callback]);
}

/**
* Alias old model names to new simplified names
* This ensures backward compatibility when the backend changes model names
*/
export function aliasModelName(modelName: string | undefined): string {
if (!modelName) return "";

// Map old model names to new simplified name
if (
modelName === "ibnzterrell/Meta-Llama-3.3-70B-Instruct-AWQ-INT4" ||
modelName === "llama3-3-70b"
) {
return "llama-3.3-70b";
}
export const LLAMA_MODEL_ID = "llama3-3-70b";

if (modelName === "qwen3-coder-480b") {
return "kimi-k2-5";
}
const MODEL_NAME_ALIASES: Record<string, string> = {
"llama-3.3-70b": LLAMA_MODEL_ID,
"gemma-3-27b": "gemma4-31b",
"deepseek-r1-0528": "kimi-k2-5",
"kimi-k2": "kimi-k2-5",
"kimi-k2-thinking": "kimi-k2-5"
};
Comment on lines +79 to +85
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🔴 Dropped backward-compatibility aliases for three old model names

The refactoring from if-else to a lookup table removed three previously-existing aliases:

  • "ibnzterrell/Meta-Llama-3.3-70B-Instruct-AWQ-INT4" (was → "llama-3.3-70b", which chains to "llama3-3-70b")
  • "qwen3-coder-480b" (was → "kimi-k2-5")
  • "leon-se/gemma-3-27b-it-fp8-dynamic" (was → "gemma4-31b")

These aliases exist for backward compatibility with persisted user data. When getChatById loads a chat at LocalStateContext.tsx:241, it calls aliasModelName(parsedChat.model). If a user has an old chat stored with one of these model names, it will now pass through un-aliased. The unrecognized model name then won't match any MODEL_CONFIG entry in ModelSelector.tsx, causing incorrect token limit fallbacks (via getModelTokenLimit at frontend/src/components/ModelSelector.tsx:78) and broken display names. Similarly, persistChat at frontend/src/state/LocalStateContext.tsx:96 will re-persist the unrecognized name rather than migrating it.

Suggested change
const MODEL_NAME_ALIASES: Record<string, string> = {
"llama-3.3-70b": LLAMA_MODEL_ID,
"gemma-3-27b": "gemma4-31b",
"deepseek-r1-0528": "kimi-k2-5",
"kimi-k2": "kimi-k2-5",
"kimi-k2-thinking": "kimi-k2-5"
};
const MODEL_NAME_ALIASES: Record<string, string> = {
"ibnzterrell/Meta-Llama-3.3-70B-Instruct-AWQ-INT4": LLAMA_MODEL_ID,
"llama-3.3-70b": LLAMA_MODEL_ID,
"leon-se/gemma-3-27b-it-fp8-dynamic": "gemma4-31b",
"gemma-3-27b": "gemma4-31b",
"qwen3-coder-480b": "kimi-k2-5",
"deepseek-r1-0528": "kimi-k2-5",
"kimi-k2": "kimi-k2-5",
"kimi-k2-thinking": "kimi-k2-5"
};
Open in Devin Review

Was this helpful? React with 👍 or 👎 to provide feedback.


if (modelName === "gemma-3-27b" || modelName === "leon-se/gemma-3-27b-it-fp8-dynamic") {
return "gemma4-31b";
}

if (modelName === "deepseek-r1-0528") {
return "kimi-k2-5";
}

// Alias kimi-k2 (old thinking model) to kimi-k2-5
if (modelName === "kimi-k2" || modelName === "kimi-k2-thinking") {
return "kimi-k2-5";
}
export function aliasModelName(modelName: string | undefined): string {
if (!modelName) return "";

return modelName;
return MODEL_NAME_ALIASES[modelName] ?? modelName;
}
Loading