From 7ea61c2f48d485db8bf52c3c57b68115fc8974de Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Wed, 4 Mar 2026 11:51:16 +0100 Subject: [PATCH 1/2] Add reproduction for sentry-javascript#18962 Reproduces OpenAI streaming breakage when Sentry tracing is enabled with LangGraph agent.stream() and streamMode: ["messages"]. Based on community feedback from rad20c's comment suggesting tracesSampleRate is the key config that causes the issue. Co-Authored-By: Claude Opus 4.6 --- sentry-javascript/18962/.gitignore | 3 + sentry-javascript/18962/README.md | 71 +++++++++ sentry-javascript/18962/nest-cli.json | 5 + sentry-javascript/18962/package.json | 27 ++++ sentry-javascript/18962/src/app.module.ts | 9 ++ sentry-javascript/18962/src/instrument.ts | 17 +++ sentry-javascript/18962/src/main.ts | 13 ++ .../18962/src/stream.controller.ts | 144 ++++++++++++++++++ sentry-javascript/18962/tsconfig.json | 21 +++ 9 files changed, 310 insertions(+) create mode 100644 sentry-javascript/18962/.gitignore create mode 100644 sentry-javascript/18962/README.md create mode 100644 sentry-javascript/18962/nest-cli.json create mode 100644 sentry-javascript/18962/package.json create mode 100644 sentry-javascript/18962/src/app.module.ts create mode 100644 sentry-javascript/18962/src/instrument.ts create mode 100644 sentry-javascript/18962/src/main.ts create mode 100644 sentry-javascript/18962/src/stream.controller.ts create mode 100644 sentry-javascript/18962/tsconfig.json diff --git a/sentry-javascript/18962/.gitignore b/sentry-javascript/18962/.gitignore new file mode 100644 index 0000000..deed335 --- /dev/null +++ b/sentry-javascript/18962/.gitignore @@ -0,0 +1,3 @@ +node_modules/ +dist/ +.env diff --git a/sentry-javascript/18962/README.md b/sentry-javascript/18962/README.md new file mode 100644 index 0000000..2ac6a4c --- /dev/null +++ b/sentry-javascript/18962/README.md @@ -0,0 +1,71 @@ +# Reproduction for sentry-javascript#18962 + +**Issue:** https://github.com/getsentry/sentry-javascript/issues/18962 + +## Description + +Sentry's tracing instrumentation (`tracesSampleRate: 1.0`) breaks OpenAI streaming when using LangGraph's `agent.stream()` with `streamMode: ["messages"]`. Instead of receiving incremental chunks, the entire response arrives in one or very few chunks. + +The bug is specific to **agent-level streaming** (LangGraph `createReactAgent` + `agent.stream()`). Direct `model.stream()` calls appear to work correctly. + +## Steps to Reproduce + +1. Install dependencies: + ```bash + npm install + ``` + +2. Export your OpenAI API key: + ```bash + export OPENAI_API_KEY=sk-your-key-here + ``` + +3. Optionally set a Sentry DSN (not required to see the streaming behavior): + ```bash + export SENTRY_DSN= + ``` + +4. Run **without** Sentry (baseline - streaming works): + ```bash + npm run start:without-sentry + ``` + Then in another terminal: + ```bash + curl http://localhost:3000/stream-agent + ``` + You should see chunks arriving incrementally. + +5. Stop the server, then run **with** Sentry (streaming breaks): + ```bash + npm run start:with-sentry + ``` + Then: + ```bash + curl http://localhost:3000/stream-agent + ``` + The response arrives all at once instead of streaming. + +6. For comparison, the direct model streaming endpoint works in both modes: + ```bash + curl http://localhost:3000/stream-model + ``` + +## Expected Behavior + +`agent.stream()` should deliver chunks incrementally regardless of whether Sentry tracing is enabled. + +## Actual Behavior + +With `tracesSampleRate: 1.0`, the agent stream delivers the full response in one chunk (or very few chunks) instead of streaming incrementally. Removing `tracesSampleRate` or setting it to `0` restores correct streaming behavior. + +## Workarounds + +- Remove `tracesSampleRate` from Sentry config +- Set `defaultIntegrations: false` (requires manually adding needed integrations) + +## Environment + +- `@sentry/nestjs`: ^10.41.0 +- `@langchain/openai`: ^0.5.0 +- `@langchain/langgraph`: ^0.2.0 +- Node.js: 18+ diff --git a/sentry-javascript/18962/nest-cli.json b/sentry-javascript/18962/nest-cli.json new file mode 100644 index 0000000..2566481 --- /dev/null +++ b/sentry-javascript/18962/nest-cli.json @@ -0,0 +1,5 @@ +{ + "$schema": "https://json.schemastore.org/nest-cli", + "collection": "@nestjs/schematics", + "sourceRoot": "src" +} diff --git a/sentry-javascript/18962/package.json b/sentry-javascript/18962/package.json new file mode 100644 index 0000000..280859b --- /dev/null +++ b/sentry-javascript/18962/package.json @@ -0,0 +1,27 @@ +{ + "name": "sentry-nestjs-openai-streaming-repro", + "version": "1.0.0", + "description": "Reproduction for sentry-javascript#18962 - Sentry breaking OpenAI streaming with LangGraph agent", + "main": "dist/main.js", + "scripts": { + "build": "nest build", + "start:with-sentry": "ENABLE_SENTRY=true nest start", + "start:without-sentry": "ENABLE_SENTRY=false nest start" + }, + "dependencies": { + "@langchain/core": "^0.3.0", + "@langchain/langgraph": "^0.2.0", + "@langchain/openai": "^0.5.0", + "@nestjs/common": "^10.0.0", + "@nestjs/core": "^10.0.0", + "@nestjs/platform-express": "^10.0.0", + "@sentry/nestjs": "^10.41.0", + "reflect-metadata": "^0.2.0", + "rxjs": "^7.8.1" + }, + "devDependencies": { + "@nestjs/cli": "^10.0.0", + "@types/node": "^22.0.0", + "typescript": "^5.0.0" + } +} diff --git a/sentry-javascript/18962/src/app.module.ts b/sentry-javascript/18962/src/app.module.ts new file mode 100644 index 0000000..0c80b22 --- /dev/null +++ b/sentry-javascript/18962/src/app.module.ts @@ -0,0 +1,9 @@ +import { Module } from "@nestjs/common"; +import { StreamController } from "./stream.controller"; + +@Module({ + imports: [], + controllers: [StreamController], + providers: [], +}) +export class AppModule {} diff --git a/sentry-javascript/18962/src/instrument.ts b/sentry-javascript/18962/src/instrument.ts new file mode 100644 index 0000000..8a78f7b --- /dev/null +++ b/sentry-javascript/18962/src/instrument.ts @@ -0,0 +1,17 @@ +import * as Sentry from "@sentry/nestjs"; + +if (process.env.ENABLE_SENTRY === "true") { + console.log("Initializing Sentry..."); + + Sentry.init({ + dsn: process.env.SENTRY_DSN || "", + environment: "local", + // This is the key setting that breaks streaming. + // Removing tracesSampleRate (or setting it to 0) fixes the issue. + tracesSampleRate: 1.0, + }); + + console.log("Sentry initialized with tracesSampleRate: 1.0"); +} else { + console.log("Sentry disabled (ENABLE_SENTRY != true)"); +} diff --git a/sentry-javascript/18962/src/main.ts b/sentry-javascript/18962/src/main.ts new file mode 100644 index 0000000..3b50d33 --- /dev/null +++ b/sentry-javascript/18962/src/main.ts @@ -0,0 +1,13 @@ +import "./instrument"; + +import { NestFactory } from "@nestjs/core"; +import { AppModule } from "./app.module"; + +async function bootstrap() { + const app = await NestFactory.create(AppModule); + const port = process.env.PORT || 3000; + await app.listen(port); + console.log(`Server running at http://localhost:${port}`); + console.log(`Test streaming: curl http://localhost:${port}/stream-agent`); +} +bootstrap(); diff --git a/sentry-javascript/18962/src/stream.controller.ts b/sentry-javascript/18962/src/stream.controller.ts new file mode 100644 index 0000000..79adcc2 --- /dev/null +++ b/sentry-javascript/18962/src/stream.controller.ts @@ -0,0 +1,144 @@ +import { Controller, Get, Res } from "@nestjs/common"; +import { Response } from "express"; +import { ChatOpenAI } from "@langchain/openai"; +import { createReactAgent } from "@langchain/langgraph/prebuilt"; + +@Controller() +export class StreamController { + @Get("/") + index() { + return { + message: "Reproduction for sentry-javascript#18962", + issue: "https://github.com/getsentry/sentry-javascript/issues/18962", + endpoints: { + "/stream-agent": + "Test LangGraph agent streaming (reproduces the bug)", + "/stream-model": + "Test direct ChatOpenAI model streaming (works fine)", + }, + sentryEnabled: process.env.ENABLE_SENTRY === "true", + }; + } + + // This endpoint reproduces the bug. + // With Sentry tracing enabled, the stream delivers the full response + // in one chunk instead of streaming incrementally. + @Get("/stream-agent") + async streamAgent(@Res() res: Response) { + try { + const model = new ChatOpenAI({ + model: "gpt-4o-mini", + temperature: 0, + streaming: true, + }); + + const agent = createReactAgent({ llm: model, tools: [] }); + + res.setHeader("Content-Type", "text/plain; charset=utf-8"); + res.setHeader("Transfer-Encoding", "chunked"); + + console.log("\n[agent.stream] Starting streaming request..."); + const startTime = Date.now(); + let chunkCount = 0; + + const stream = await agent.stream( + { + messages: [ + { + role: "user", + content: + "Count from 1 to 20, each number on a new line. Be slow and deliberate.", + }, + ], + }, + { streamMode: ["messages"] }, + ); + + for await (const chunk of stream) { + chunkCount++; + // chunk is [streamMode, data] tuple when using streamMode array + const data = chunk[1]; + const content = data?.content ?? ""; + if (content) { + console.log( + ` Chunk ${chunkCount}: "${String(content).replace(/\n/g, "\\n")}"`, + ); + res.write(String(content)); + } + } + + const elapsed = Date.now() - startTime; + console.log( + `\n[agent.stream] Done in ${elapsed}ms, chunks: ${chunkCount}`, + ); + + if (chunkCount < 10) { + console.log( + "WARNING: Very few chunks received - streaming may be broken!", + ); + } + + res.end( + `\n\n--- Stats ---\nChunks: ${chunkCount}\nTime: ${elapsed}ms\nSentry: ${process.env.ENABLE_SENTRY === "true" ? "ENABLED" : "disabled"}\n`, + ); + } catch (error: any) { + console.error("Error:", error.message); + if (!res.headersSent) { + res.status(500).json({ error: error.message }); + } else { + res.end(`\nError: ${error.message}`); + } + } + } + + // This endpoint uses direct model streaming for comparison. + // It typically works fine even with Sentry enabled. + @Get("/stream-model") + async streamModel(@Res() res: Response) { + try { + const model = new ChatOpenAI({ + model: "gpt-4o-mini", + temperature: 0, + streaming: true, + }); + + res.setHeader("Content-Type", "text/plain; charset=utf-8"); + res.setHeader("Transfer-Encoding", "chunked"); + + console.log("\n[model.stream] Starting streaming request..."); + const startTime = Date.now(); + let chunkCount = 0; + + const stream = await model.stream( + "Count from 1 to 20, each number on a new line. Be slow and deliberate.", + ); + + for await (const chunk of stream) { + chunkCount++; + const content = chunk.content.toString() || ""; + if (content) { + console.log( + ` Chunk ${chunkCount}: "${content.replace(/\n/g, "\\n")}"`, + ); + res.write(content); + } + } + + const elapsed = Date.now() - startTime; + console.log( + `\n[model.stream] Done in ${elapsed}ms, chunks: ${chunkCount}`, + ); + + res.end( + `\n\n--- Stats ---\nChunks: ${chunkCount}\nTime: ${elapsed}ms\nSentry: ${process.env.ENABLE_SENTRY === "true" ? "ENABLED" : "disabled"}\nMethod: direct model.stream()\n`, + ); + } catch (error: any) { + console.error("Error:", error.message); + if (!res.headersSent) { + res.status(500).json({ error: error.message }); + } else { + res.end(`\nError: ${error.message}`); + } + } + } +} diff --git a/sentry-javascript/18962/tsconfig.json b/sentry-javascript/18962/tsconfig.json new file mode 100644 index 0000000..95f5641 --- /dev/null +++ b/sentry-javascript/18962/tsconfig.json @@ -0,0 +1,21 @@ +{ + "compilerOptions": { + "module": "commonjs", + "declaration": true, + "removeComments": true, + "emitDecoratorMetadata": true, + "experimentalDecorators": true, + "allowSyntheticDefaultImports": true, + "target": "ES2021", + "sourceMap": true, + "outDir": "./dist", + "baseUrl": "./", + "incremental": true, + "skipLibCheck": true, + "strictNullChecks": false, + "noImplicitAny": false, + "strictBindCallApply": false, + "forceConsistentCasingInFileNames": false, + "noFallthroughCasesInSwitch": false + } +} From b6d20f3117a70fe366665d4bf24edead30c6902a Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Wed, 4 Mar 2026 12:25:58 +0100 Subject: [PATCH 2/2] Update repro: use langchain v1 createAgent, add test results - Switch from @langchain/langgraph createReactAgent to langchain v1 createAgent - Load .env via dotenv for OPENAI_API_KEY - Add chunk counting and timing to both endpoints - Update README with test results: bug does not reproduce on latest @sentry/nestjs@10.42.0 + langchain@1.2.29 Co-Authored-By: Claude Opus 4.6 --- sentry-javascript/18962/README.md | 55 ++++++++----------- sentry-javascript/18962/package.json | 8 ++- sentry-javascript/18962/src/instrument.ts | 1 + .../18962/src/stream.controller.ts | 44 ++++++++------- 4 files changed, 52 insertions(+), 56 deletions(-) diff --git a/sentry-javascript/18962/README.md b/sentry-javascript/18962/README.md index 2ac6a4c..6c480f1 100644 --- a/sentry-javascript/18962/README.md +++ b/sentry-javascript/18962/README.md @@ -4,9 +4,21 @@ ## Description -Sentry's tracing instrumentation (`tracesSampleRate: 1.0`) breaks OpenAI streaming when using LangGraph's `agent.stream()` with `streamMode: ["messages"]`. Instead of receiving incremental chunks, the entire response arrives in one or very few chunks. +Sentry's tracing instrumentation (`tracesSampleRate: 1.0`) reportedly breaks OpenAI streaming when using LangChain/LangGraph's `agent.stream()` with `streamMode: ["messages"]`. Instead of receiving incremental chunks, the entire response arrives in one or very few chunks. -The bug is specific to **agent-level streaming** (LangGraph `createReactAgent` + `agent.stream()`). Direct `model.stream()` calls appear to work correctly. +## Test Results + +**On latest versions (`@sentry/nestjs@10.42.0`, `langchain@1.2.29`), the bug does not reproduce.** + +Both endpoints stream correctly with Sentry tracing enabled: + +| Endpoint | Sentry | Content Chunks | First Byte | Total | +|---|---|---|---|---| +| `/stream-agent` | OFF | ~40 | ~0.7s | ~1.2s | +| `/stream-agent` | ON | ~46 | ~0.5s | ~1.0s | +| `/stream-model` | ON | ~41 | ~0.7s | ~1.1s | + +The fix was likely included in one of the recent `@sentry/nestjs` releases (possibly PR #19122, released in 10.39.0). The issue was reported on 10.41.0 by @rad20c — pinning to that version may reproduce it. ## Steps to Reproduce @@ -15,17 +27,12 @@ The bug is specific to **agent-level streaming** (LangGraph `createReactAgent` + npm install ``` -2. Export your OpenAI API key: - ```bash - export OPENAI_API_KEY=sk-your-key-here +2. Add your OpenAI API key to `.env`: ``` - -3. Optionally set a Sentry DSN (not required to see the streaming behavior): - ```bash - export SENTRY_DSN= + OPENAI_API_KEY=sk-your-key-here ``` -4. Run **without** Sentry (baseline - streaming works): +3. Run **without** Sentry (baseline): ```bash npm run start:without-sentry ``` @@ -33,9 +40,8 @@ The bug is specific to **agent-level streaming** (LangGraph `createReactAgent` + ```bash curl http://localhost:3000/stream-agent ``` - You should see chunks arriving incrementally. -5. Stop the server, then run **with** Sentry (streaming breaks): +4. Stop the server, then run **with** Sentry: ```bash npm run start:with-sentry ``` @@ -43,29 +49,12 @@ The bug is specific to **agent-level streaming** (LangGraph `createReactAgent` + ```bash curl http://localhost:3000/stream-agent ``` - The response arrives all at once instead of streaming. - -6. For comparison, the direct model streaming endpoint works in both modes: - ```bash - curl http://localhost:3000/stream-model - ``` - -## Expected Behavior - -`agent.stream()` should deliver chunks incrementally regardless of whether Sentry tracing is enabled. - -## Actual Behavior - -With `tracesSampleRate: 1.0`, the agent stream delivers the full response in one chunk (or very few chunks) instead of streaming incrementally. Removing `tracesSampleRate` or setting it to `0` restores correct streaming behavior. - -## Workarounds -- Remove `tracesSampleRate` from Sentry config -- Set `defaultIntegrations: false` (requires manually adding needed integrations) +5. Compare chunk counts and timing between the two modes. The `/stream-model` endpoint is available for comparison with direct `ChatOpenAI` streaming. ## Environment -- `@sentry/nestjs`: ^10.41.0 -- `@langchain/openai`: ^0.5.0 -- `@langchain/langgraph`: ^0.2.0 +- `@sentry/nestjs`: 10.42.0 +- `langchain`: 1.2.29 +- `@langchain/openai`: 1.2.12 - Node.js: 18+ diff --git a/sentry-javascript/18962/package.json b/sentry-javascript/18962/package.json index 280859b..6299a3b 100644 --- a/sentry-javascript/18962/package.json +++ b/sentry-javascript/18962/package.json @@ -9,13 +9,15 @@ "start:without-sentry": "ENABLE_SENTRY=false nest start" }, "dependencies": { - "@langchain/core": "^0.3.0", - "@langchain/langgraph": "^0.2.0", - "@langchain/openai": "^0.5.0", + "@langchain/core": "^1.1.30", + "@langchain/langgraph": "^1.2.0", + "@langchain/openai": "^1.2.12", "@nestjs/common": "^10.0.0", "@nestjs/core": "^10.0.0", "@nestjs/platform-express": "^10.0.0", "@sentry/nestjs": "^10.41.0", + "dotenv": "^16.6.1", + "langchain": "^1.2.29", "reflect-metadata": "^0.2.0", "rxjs": "^7.8.1" }, diff --git a/sentry-javascript/18962/src/instrument.ts b/sentry-javascript/18962/src/instrument.ts index 8a78f7b..5b4fdbf 100644 --- a/sentry-javascript/18962/src/instrument.ts +++ b/sentry-javascript/18962/src/instrument.ts @@ -1,3 +1,4 @@ +import "dotenv/config"; import * as Sentry from "@sentry/nestjs"; if (process.env.ENABLE_SENTRY === "true") { diff --git a/sentry-javascript/18962/src/stream.controller.ts b/sentry-javascript/18962/src/stream.controller.ts index 79adcc2..ceb9a08 100644 --- a/sentry-javascript/18962/src/stream.controller.ts +++ b/sentry-javascript/18962/src/stream.controller.ts @@ -1,7 +1,8 @@ +// @ts-nocheck import { Controller, Get, Res } from "@nestjs/common"; import { Response } from "express"; import { ChatOpenAI } from "@langchain/openai"; -import { createReactAgent } from "@langchain/langgraph/prebuilt"; +import { createAgent } from "langchain"; @Controller() export class StreamController { @@ -26,23 +27,21 @@ export class StreamController { @Get("/stream-agent") async streamAgent(@Res() res: Response) { try { - const model = new ChatOpenAI({ - model: "gpt-4o-mini", - temperature: 0, - streaming: true, + const agent = createAgent({ + model: "openai:gpt-4o-mini", + tools: [], }); - const agent = createReactAgent({ llm: model, tools: [] }); - res.setHeader("Content-Type", "text/plain; charset=utf-8"); res.setHeader("Transfer-Encoding", "chunked"); console.log("\n[agent.stream] Starting streaming request..."); const startTime = Date.now(); let chunkCount = 0; + let contentChunkCount = 0; - const stream = await agent.stream( - { + // @ts-ignore - type instantiation issues with langchain generics + const stream = await agent.stream({ messages: [ { role: "user", @@ -56,12 +55,14 @@ export class StreamController { for await (const chunk of stream) { chunkCount++; - // chunk is [streamMode, data] tuple when using streamMode array - const data = chunk[1]; - const content = data?.content ?? ""; + const elapsed = Date.now() - startTime; + // chunk is ["messages", AIMessageChunk] or similar tuple + const msg = Array.isArray(chunk[1]) ? chunk[1][0] : chunk[1]; + const content = msg?.kwargs?.content ?? msg?.content ?? ""; if (content) { + contentChunkCount++; console.log( - ` Chunk ${chunkCount}: "${String(content).replace(/\n/g, "\\n")}"`, + ` [${elapsed}ms] Chunk ${contentChunkCount}: "${String(content).replace(/\n/g, "\\n")}"`, ); res.write(String(content)); } @@ -69,17 +70,17 @@ export class StreamController { const elapsed = Date.now() - startTime; console.log( - `\n[agent.stream] Done in ${elapsed}ms, chunks: ${chunkCount}`, + `\n[agent.stream] Done in ${elapsed}ms, total chunks: ${chunkCount}, content chunks: ${contentChunkCount}`, ); - if (chunkCount < 10) { + if (contentChunkCount < 10) { console.log( - "WARNING: Very few chunks received - streaming may be broken!", + "WARNING: Very few content chunks received - streaming may be broken!", ); } res.end( - `\n\n--- Stats ---\nChunks: ${chunkCount}\nTime: ${elapsed}ms\nSentry: ${process.env.ENABLE_SENTRY === "true" ? "ENABLED" : "disabled"}\n`, + `\n\n--- Stats ---\nTotal chunks: ${chunkCount}\nContent chunks: ${contentChunkCount}\nTime: ${elapsed}ms\nSentry: ${process.env.ENABLE_SENTRY === "true" ? "ENABLED" : "disabled"}\n`, ); } catch (error: any) { console.error("Error:", error.message); @@ -108,6 +109,7 @@ export class StreamController { console.log("\n[model.stream] Starting streaming request..."); const startTime = Date.now(); let chunkCount = 0; + let contentChunkCount = 0; const stream = await model.stream( "Count from 1 to 20, each number on a new line. Be slow and deliberate.", @@ -115,10 +117,12 @@ export class StreamController { for await (const chunk of stream) { chunkCount++; + const elapsed = Date.now() - startTime; const content = chunk.content.toString() || ""; if (content) { + contentChunkCount++; console.log( - ` Chunk ${chunkCount}: "${content.replace(/\n/g, "\\n")}"`, + ` [${elapsed}ms] Chunk ${contentChunkCount}: "${content.replace(/\n/g, "\\n")}"`, ); res.write(content); } @@ -126,11 +130,11 @@ export class StreamController { const elapsed = Date.now() - startTime; console.log( - `\n[model.stream] Done in ${elapsed}ms, chunks: ${chunkCount}`, + `\n[model.stream] Done in ${elapsed}ms, total chunks: ${chunkCount}, content chunks: ${contentChunkCount}`, ); res.end( - `\n\n--- Stats ---\nChunks: ${chunkCount}\nTime: ${elapsed}ms\nSentry: ${process.env.ENABLE_SENTRY === "true" ? "ENABLED" : "disabled"}\nMethod: direct model.stream()\n`, + `\n\n--- Stats ---\nTotal chunks: ${chunkCount}\nContent chunks: ${contentChunkCount}\nTime: ${elapsed}ms\nSentry: ${process.env.ENABLE_SENTRY === "true" ? "ENABLED" : "disabled"}\nMethod: direct model.stream()\n`, ); } catch (error: any) { console.error("Error:", error.message);