-
Notifications
You must be signed in to change notification settings - Fork 2.8k
fix: proper context window loading for LMStudio (fixes #5075) #5814
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,8 +1,7 @@ | ||
| import { Anthropic } from "@anthropic-ai/sdk" | ||
| import OpenAI from "openai" | ||
| import axios from "axios" | ||
|
|
||
| import { type ModelInfo, openAiModelInfoSaneDefaults, LMSTUDIO_DEFAULT_TEMPERATURE } from "@roo-code/types" | ||
| import { LMSTUDIO_DEFAULT_TEMPERATURE, type ModelInfo, openAiModelInfoSaneDefaults } from "@roo-code/types" | ||
|
|
||
| import type { ApiHandlerOptions } from "../../shared/api" | ||
|
|
||
|
|
@@ -11,22 +10,41 @@ import { XmlMatcher } from "../../utils/xml-matcher" | |
| import { convertToOpenAiMessages } from "../transform/openai-format" | ||
| import { ApiStream } from "../transform/stream" | ||
|
|
||
| import type { ApiHandlerCreateMessageMetadata, SingleCompletionHandler } from "../index" | ||
| import { BaseProvider } from "./base-provider" | ||
| import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" | ||
| import { flushModels, getModels } from "./fetchers/modelCache" | ||
|
|
||
| type ModelInfoCaching = { | ||
| modelInfo: ModelInfo | ||
| cached: boolean | ||
| } | ||
|
|
||
| export class LmStudioHandler extends BaseProvider implements SingleCompletionHandler { | ||
| protected options: ApiHandlerOptions | ||
| private client: OpenAI | ||
| private cachedModelInfo: ModelInfoCaching = { | ||
| modelInfo: openAiModelInfoSaneDefaults, | ||
| cached: false, | ||
| } | ||
| private lastRecacheTime: number = -1 | ||
|
|
||
| constructor(options: ApiHandlerOptions) { | ||
| super() | ||
| this.options = options | ||
| this.client = new OpenAI({ | ||
| baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1", | ||
| baseURL: this.getBaseUrl() + "/v1", | ||
| apiKey: "noop", | ||
| }) | ||
| } | ||
|
|
||
| private getBaseUrl(): string { | ||
| if (this.options.lmStudioBaseUrl && this.options.lmStudioBaseUrl.trim() !== "") { | ||
| return this.options.lmStudioBaseUrl.trim() | ||
| } else { | ||
| return "http://localhost:1234" | ||
| } | ||
| } | ||
|
|
||
| override async *createMessage( | ||
| systemPrompt: string, | ||
| messages: Anthropic.Messages.MessageParam[], | ||
|
|
@@ -118,6 +136,29 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan | |
| outputTokens = 0 | ||
| } | ||
|
|
||
| if ( | ||
| !this.cachedModelInfo.cached && | ||
| (this.lastRecacheTime < 0 || Date.now() - this.lastRecacheTime > 30 * 1000) | ||
| ) { | ||
| // assume that if we didn't get a response in 30 seconds | ||
| this.lastRecacheTime = Date.now() // Update last recache time to avoid race condition | ||
|
|
||
| // We need to fetch the model info every time we open a new session | ||
| // to ensure we have the latest context window and other details | ||
| // since LM Studio models can chance their context windows on reload | ||
| await flushModels("lmstudio") | ||
| const models = await getModels({ provider: "lmstudio", baseUrl: this.getBaseUrl() }) | ||
| if (models && models[this.getModel().id]) { | ||
| this.cachedModelInfo = { | ||
| modelInfo: models[this.getModel().id], | ||
| cached: true, | ||
| } | ||
| } else { | ||
| // if model info is not found, still mark the result as cached to avoid retries on every chunk | ||
| this.cachedModelInfo.cached = true | ||
| } | ||
| } | ||
pwilkin marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| yield { | ||
| type: "usage", | ||
| inputTokens, | ||
|
|
@@ -133,7 +174,7 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan | |
| override getModel(): { id: string; info: ModelInfo } { | ||
| return { | ||
| id: this.options.lmStudioModelId || "", | ||
| info: openAiModelInfoSaneDefaults, | ||
| info: this.cachedModelInfo.modelInfo, | ||
| } | ||
| } | ||
|
|
||
|
|
@@ -161,17 +202,3 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan | |
| } | ||
| } | ||
| } | ||
|
|
||
| export async function getLmStudioModels(baseUrl = "http://localhost:1234") { | ||
| try { | ||
| if (!URL.canParse(baseUrl)) { | ||
| return [] | ||
| } | ||
|
|
||
| const response = await axios.get(`${baseUrl}/v1/models`) | ||
| const modelsArray = response.data?.data?.map((model: any) => model.id) || [] | ||
| return [...new Set<string>(modelsArray)] | ||
| } catch (error) { | ||
| return [] | ||
| } | ||
| } | ||
Uh oh!
There was an error while loading. Please reload this page.