Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,20 @@
<a href="https://discord.gg/roocode" target="_blank"><img src="https://img.shields.io/badge/Join%20Discord-5865F2?style=for-the-badge&logo=discord&logoColor=white" alt="Join Discord" height="60"></a>
<a href="https://www.reddit.com/r/RooCode/" target="_blank"><img src="https://img.shields.io/badge/Join%20Reddit-FF4500?style=for-the-badge&logo=reddit&logoColor=white" alt="Join Reddit" height="60"></a>

<a href="https://github.com/RooVetGit/Roo-Code"><img src="https://img.shields.io/github/stars/RooVetGit/Roo-Code?style=social" alt="GitHub Stars"></a>
<a href="https://marketplace.visualstudio.com/items?itemName=RooVeterinaryInc.roo-cline" target="_blank"><img src="https://img.shields.io/visual-studio-marketplace/i/RooVeterinaryInc.roo-cline?label=VS%20Marketplace%20Installs&style=social" alt="VS Marketplace Installs"></a>
<a href="https://github.com/RooVetGit/Roo-Code"><img src="https://img.shields.io/github/stars/RooVetGit/Roo-Code?style=social" alt="GitHub Stars"></a>
<a href="https://marketplace.visualstudio.com/items?itemName=RooVeterinaryInc.roo-cline" target="_blank"><img src="https://img.shields.io/visual-studio-marketplace/i/RooVeterinaryInc.roo-cline?label=VS%20Marketplace%20Installs&style=social" alt="VS Marketplace Installs"></a>

</div>
<br>
<br>



<div align="center">
<h1>Roo Code (prev. Roo Cline)</h1>

<a href="https://marketplace.visualstudio.com/items?itemName=RooVeterinaryInc.roo-cline" target="_blank"><img src="https://img.shields.io/badge/Download%20on%20VS%20Marketplace-blue?style=for-the-badge&logo=visualstudiocode&logoColor=white" alt="Download on VS Marketplace"></a>
<a href="https://github.com/RooVetGit/Roo-Code/discussions/categories/feature-requests?discussions_q=is%3Aopen+category%3A%22Feature+Requests%22+sort%3Atop" target="_blank"><img src="https://img.shields.io/badge/Feature%20Requests-yellow?style=for-the-badge" alt="Feature Requests"></a>
<a href="https://marketplace.visualstudio.com/items?itemName=RooVeterinaryInc.roo-cline&ssr=false#review-details" target="_blank"><img src="https://img.shields.io/badge/Rate%20%26%20Review-green?style=for-the-badge" alt="Rate & Review"></a>

</div>

**Roo Code** is an AI-powered **autonomous coding agent** that lives in your editor. It can:
Expand Down
116 changes: 116 additions & 0 deletions src/api/providers/__tests__/openai.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,122 @@ describe("OpenAiHandler", () => {
expect(textChunks).toHaveLength(1)
expect(textChunks[0].text).toBe("Test response")
})

it("should handle thinking tags in a stream", async () => {
const openaiOptions = {
...mockOptions,
openAiCustomModelInfo: {
thinkTokensInResponse: true,
contextWindow: 128_000,
supportsImages: false,
supportsPromptCache: false,
},
}
const handler = new OpenAiHandler(openaiOptions)
mockCreate.mockImplementationOnce(async (options) => {
return {
[Symbol.asyncIterator]: async function* () {
yield {
choices: [
{
delta: { content: "<think" },
index: 0,
},
],
usage: null,
}
yield {
choices: [
{
delta: { content: ">thoughts<" },
index: 1,
},
],
usage: null,
}
yield {
choices: [
{
delta: { content: "/think>" },
index: 2,
},
],
usage: null,
}
yield {
choices: [
{
delta: { content: "result<th" },
index: 3,
},
],
usage: null,
}
},
}
})

const stream = handler.createMessage(systemPrompt, messages)
const chunks: any[] = []
for await (const chunk of stream) {
chunks.push(chunk)
}

expect(chunks.length).toBeGreaterThan(0)
const textChunks = chunks.filter((chunk) => chunk.type === "text")
expect(textChunks).toHaveLength(1)
expect(textChunks[0].text).toBe("result<th")

const reasoningChunks = chunks.filter((chunk) => chunk.type === "reasoning")
expect(reasoningChunks).toHaveLength(1)
expect(reasoningChunks[0].text).toBe("thoughts")
})

it("should handle thinking tags when not streaming", async () => {
const openaiOptions = {
...mockOptions,
openAiCustomModelInfo: {
thinkTokensInResponse: true,
contextWindow: 128_000,
supportsImages: false,
supportsPromptCache: false,
},
openAiStreamingEnabled: false,
}
const handler = new OpenAiHandler(openaiOptions)
mockCreate.mockImplementationOnce(async (options) => {
return {
id: "custom-test-completion",
choices: [
{
message: { role: "assistant", content: "<think>thoughts</think>result<th" },
finish_reason: "stop",
index: 0,
},
],
usage: {
prompt_tokens: 5,
completion_tokens: 7,
total_tokens: 12,
},
}
})

const stream = handler.createMessage(systemPrompt, messages)
const chunks: any[] = []
for await (const chunk of stream) {
chunks.push(chunk)
}

expect(chunks.length).toBeGreaterThan(0)
const textChunks = chunks.filter((chunk) => chunk.type === "text")
expect(textChunks).toHaveLength(1)
expect(textChunks[0].text).toBe("result<th")

const reasoningChunks = chunks.filter((chunk) => chunk.type === "reasoning")
expect(reasoningChunks).toHaveLength(1)
expect(reasoningChunks[0].text).toBe("thoughts")
})
})

describe("error handling", () => {
Expand Down
8 changes: 5 additions & 3 deletions src/api/providers/ollama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ export class OllamaHandler implements ApiHandler, SingleCompletionHandler {

async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
const modelId = this.getModel().id
const useR1Format = modelId.toLowerCase().includes('deepseek-r1')
const useR1Format = modelId.toLowerCase().includes("deepseek-r1")
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...(useR1Format ? convertToR1Format(messages) : convertToOpenAiMessages(messages)),
Expand Down Expand Up @@ -53,10 +53,12 @@ export class OllamaHandler implements ApiHandler, SingleCompletionHandler {
async completePrompt(prompt: string): Promise<string> {
try {
const modelId = this.getModel().id
const useR1Format = modelId.toLowerCase().includes('deepseek-r1')
const useR1Format = modelId.toLowerCase().includes("deepseek-r1")
const response = await this.client.chat.completions.create({
model: this.getModel().id,
messages: useR1Format ? convertToR1Format([{ role: "user", content: prompt }]) : [{ role: "user", content: prompt }],
messages: useR1Format
? convertToR1Format([{ role: "user", content: prompt }])
: [{ role: "user", content: prompt }],
temperature: 0,
stream: false,
})
Expand Down
20 changes: 8 additions & 12 deletions src/api/providers/openai-native.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
private async *handleO1FamilyMessage(
modelId: string,
systemPrompt: string,
messages: Anthropic.Messages.MessageParam[]
messages: Anthropic.Messages.MessageParam[],
): ApiStream {
// o1 supports developer prompt with formatting
// o1-preview and o1-mini only support user messages
Expand All @@ -63,7 +63,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
private async *handleO3FamilyMessage(
modelId: string,
systemPrompt: string,
messages: Anthropic.Messages.MessageParam[]
messages: Anthropic.Messages.MessageParam[],
): ApiStream {
const stream = await this.client.chat.completions.create({
model: "o3-mini",
Expand All @@ -85,7 +85,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
private async *handleDefaultModelMessage(
modelId: string,
systemPrompt: string,
messages: Anthropic.Messages.MessageParam[]
messages: Anthropic.Messages.MessageParam[],
): ApiStream {
const stream = await this.client.chat.completions.create({
model: modelId,
Expand All @@ -98,9 +98,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
yield* this.handleStreamResponse(stream)
}

private async *yieldResponseData(
response: OpenAI.Chat.Completions.ChatCompletion
): ApiStream {
private async *yieldResponseData(response: OpenAI.Chat.Completions.ChatCompletion): ApiStream {
yield {
type: "text",
text: response.choices[0]?.message.content || "",
Expand All @@ -112,9 +110,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
}
}

private async *handleStreamResponse(
stream: AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>
): ApiStream {
private async *handleStreamResponse(stream: AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>): ApiStream {
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta
if (delta?.content) {
Expand Down Expand Up @@ -168,7 +164,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler

private getO1CompletionOptions(
modelId: string,
prompt: string
prompt: string,
): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming {
return {
model: modelId,
Expand All @@ -178,7 +174,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler

private getO3CompletionOptions(
modelId: string,
prompt: string
prompt: string,
): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming {
return {
model: "o3-mini",
Expand All @@ -189,7 +185,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler

private getDefaultCompletionOptions(
modelId: string,
prompt: string
prompt: string,
): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming {
return {
model: modelId,
Expand Down
101 changes: 81 additions & 20 deletions src/api/providers/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,28 +10,18 @@ import {
import { ApiHandler, SingleCompletionHandler } from "../index"
import { convertToOpenAiMessages } from "../transform/openai-format"
import { convertToR1Format } from "../transform/r1-format"
import { ApiStream } from "../transform/stream"
import { ApiStream, ApiStreamChunk } from "../transform/stream"

export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
protected options: ApiHandlerOptions
private client: OpenAI

constructor(options: ApiHandlerOptions) {
this.options = options

let urlHost: string

try {
urlHost = new URL(this.options.openAiBaseUrl ?? "").host
} catch (error) {
// Likely an invalid `openAiBaseUrl`; we're still working on
// proper settings validation.
urlHost = ""
}

// Azure API shape slightly differs from the core API shape:
// https://github.com/openai/openai-node?tab=readme-ov-file#microsoft-azure-openai
const urlHost = new URL(this.options.openAiBaseUrl ?? "").host
if (urlHost === "azure.com" || urlHost.endsWith(".azure.com") || options.openAiUseAzure) {
// Azure API shape slightly differs from the core API shape:
// https://github.com/openai/openai-node?tab=readme-ov-file#microsoft-azure-openai
this.client = new AzureOpenAI({
baseURL: this.options.openAiBaseUrl,
apiKey: this.options.openAiApiKey,
Expand All @@ -50,6 +40,9 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
const modelId = this.options.openAiModelId ?? ""

const deepseekReasoner = modelId.includes("deepseek-reasoner")
const thinkingParser = modelInfo.thinkTokensInResponse
? new ThinkingTokenSeparator()
: new PassThroughTokenSeparator()

if (this.options.openAiStreamingEnabled ?? true) {
const systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = {
Expand All @@ -75,9 +68,8 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
const delta = chunk.choices[0]?.delta ?? {}

if (delta.content) {
yield {
type: "text",
text: delta.content,
for (const parsedChunk of thinkingParser.parseChunk(delta.content)) {
yield parsedChunk
}
}

Expand All @@ -95,6 +87,10 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
}
}
}

for (const parsedChunk of thinkingParser.flush()) {
yield parsedChunk
}
} else {
// o1 for instance doesnt support streaming, non-1 temp, or system prompt
const systemMessage: OpenAI.Chat.ChatCompletionUserMessageParam = {
Expand All @@ -111,9 +107,8 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {

const response = await this.client.chat.completions.create(requestOptions)

yield {
type: "text",
text: response.choices[0]?.message.content || "",
for (const parsedChunk of thinkingParser.parseChunk(response.choices[0]?.message.content || "", true)) {
yield parsedChunk
}
yield {
type: "usage",
Expand Down Expand Up @@ -147,3 +142,69 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
}
}
}

class PassThroughTokenSeparator {
public parseChunk(chunk: string): ApiStreamChunk[] {
return [{ type: "text", text: chunk }]
}

public flush(): ApiStreamChunk[] {
return []
}
}
class ThinkingTokenSeparator {
private insideThinking = false
private buffer = ""

public parseChunk(chunk: string, flush: boolean = false): ApiStreamChunk[] {
let parsed: ApiStreamChunk[] = []
chunk = this.buffer + chunk
this.buffer = ""

const parseTag = (tag: string, thinking: boolean) => {
if (chunk.indexOf(tag) !== -1) {
const [before, after] = chunk.split(tag)
if (before.length > 0) {
parsed.push({ type: thinking ? "text" : "reasoning", text: before })
}
chunk = after
this.insideThinking = thinking
} else if (this.endsWithIncompleteString(chunk, tag)) {
this.buffer = chunk
chunk = ""
}
}

if (!this.insideThinking) {
parseTag("<think>", true)
}
if (this.insideThinking) {
parseTag("</think>", false)
}

if (flush) {
chunk = this.buffer + chunk
this.buffer = ""
}

if (chunk.length > 0) {
parsed.push({ type: this.insideThinking ? "reasoning" : "text", text: chunk })
}

return parsed
}

private endsWithIncompleteString(chunk: string, str: string): boolean {
// iterate from end of the str and check if we start matching from any point
for (let i = str.length - 1; i >= 1; i--) {
if (chunk.endsWith(str.slice(0, i))) {
return true
}
}
return false
}

public flush(): ApiStreamChunk[] {
return this.parseChunk("", true)
}
}
Loading