diff --git a/sentry-javascript/18962/.gitignore b/sentry-javascript/18962/.gitignore new file mode 100644 index 0000000..deed335 --- /dev/null +++ b/sentry-javascript/18962/.gitignore @@ -0,0 +1,3 @@ +node_modules/ +dist/ +.env diff --git a/sentry-javascript/18962/README.md b/sentry-javascript/18962/README.md new file mode 100644 index 0000000..6c480f1 --- /dev/null +++ b/sentry-javascript/18962/README.md @@ -0,0 +1,60 @@ +# Reproduction for sentry-javascript#18962 + +**Issue:** https://github.com/getsentry/sentry-javascript/issues/18962 + +## Description + +Sentry's tracing instrumentation (`tracesSampleRate: 1.0`) reportedly breaks OpenAI streaming when using LangChain/LangGraph's `agent.stream()` with `streamMode: ["messages"]`. Instead of receiving incremental chunks, the entire response arrives in one or very few chunks. + +## Test Results + +**On latest versions (`@sentry/nestjs@10.42.0`, `langchain@1.2.29`), the bug does not reproduce.** + +Both endpoints stream correctly with Sentry tracing enabled: + +| Endpoint | Sentry | Content Chunks | First Byte | Total | +|---|---|---|---|---| +| `/stream-agent` | OFF | ~40 | ~0.7s | ~1.2s | +| `/stream-agent` | ON | ~46 | ~0.5s | ~1.0s | +| `/stream-model` | ON | ~41 | ~0.7s | ~1.1s | + +The fix was likely included in one of the recent `@sentry/nestjs` releases (possibly PR #19122, released in 10.39.0). The issue was reported on 10.41.0 by @rad20c — pinning to that version may reproduce it. + +## Steps to Reproduce + +1. Install dependencies: + ```bash + npm install + ``` + +2. Add your OpenAI API key to `.env`: + ``` + OPENAI_API_KEY=sk-your-key-here + ``` + +3. Run **without** Sentry (baseline): + ```bash + npm run start:without-sentry + ``` + Then in another terminal: + ```bash + curl http://localhost:3000/stream-agent + ``` + +4. Stop the server, then run **with** Sentry: + ```bash + npm run start:with-sentry + ``` + Then: + ```bash + curl http://localhost:3000/stream-agent + ``` + +5. Compare chunk counts and timing between the two modes. The `/stream-model` endpoint is available for comparison with direct `ChatOpenAI` streaming. + +## Environment + +- `@sentry/nestjs`: 10.42.0 +- `langchain`: 1.2.29 +- `@langchain/openai`: 1.2.12 +- Node.js: 18+ diff --git a/sentry-javascript/18962/nest-cli.json b/sentry-javascript/18962/nest-cli.json new file mode 100644 index 0000000..2566481 --- /dev/null +++ b/sentry-javascript/18962/nest-cli.json @@ -0,0 +1,5 @@ +{ + "$schema": "https://json.schemastore.org/nest-cli", + "collection": "@nestjs/schematics", + "sourceRoot": "src" +} diff --git a/sentry-javascript/18962/package.json b/sentry-javascript/18962/package.json new file mode 100644 index 0000000..6299a3b --- /dev/null +++ b/sentry-javascript/18962/package.json @@ -0,0 +1,29 @@ +{ + "name": "sentry-nestjs-openai-streaming-repro", + "version": "1.0.0", + "description": "Reproduction for sentry-javascript#18962 - Sentry breaking OpenAI streaming with LangGraph agent", + "main": "dist/main.js", + "scripts": { + "build": "nest build", + "start:with-sentry": "ENABLE_SENTRY=true nest start", + "start:without-sentry": "ENABLE_SENTRY=false nest start" + }, + "dependencies": { + "@langchain/core": "^1.1.30", + "@langchain/langgraph": "^1.2.0", + "@langchain/openai": "^1.2.12", + "@nestjs/common": "^10.0.0", + "@nestjs/core": "^10.0.0", + "@nestjs/platform-express": "^10.0.0", + "@sentry/nestjs": "^10.41.0", + "dotenv": "^16.6.1", + "langchain": "^1.2.29", + "reflect-metadata": "^0.2.0", + "rxjs": "^7.8.1" + }, + "devDependencies": { + "@nestjs/cli": "^10.0.0", + "@types/node": "^22.0.0", + "typescript": "^5.0.0" + } +} diff --git a/sentry-javascript/18962/src/app.module.ts b/sentry-javascript/18962/src/app.module.ts new file mode 100644 index 0000000..0c80b22 --- /dev/null +++ b/sentry-javascript/18962/src/app.module.ts @@ -0,0 +1,9 @@ +import { Module } from "@nestjs/common"; +import { StreamController } from "./stream.controller"; + +@Module({ + imports: [], + controllers: [StreamController], + providers: [], +}) +export class AppModule {} diff --git a/sentry-javascript/18962/src/instrument.ts b/sentry-javascript/18962/src/instrument.ts new file mode 100644 index 0000000..5b4fdbf --- /dev/null +++ b/sentry-javascript/18962/src/instrument.ts @@ -0,0 +1,18 @@ +import "dotenv/config"; +import * as Sentry from "@sentry/nestjs"; + +if (process.env.ENABLE_SENTRY === "true") { + console.log("Initializing Sentry..."); + + Sentry.init({ + dsn: process.env.SENTRY_DSN || "", + environment: "local", + // This is the key setting that breaks streaming. + // Removing tracesSampleRate (or setting it to 0) fixes the issue. + tracesSampleRate: 1.0, + }); + + console.log("Sentry initialized with tracesSampleRate: 1.0"); +} else { + console.log("Sentry disabled (ENABLE_SENTRY != true)"); +} diff --git a/sentry-javascript/18962/src/main.ts b/sentry-javascript/18962/src/main.ts new file mode 100644 index 0000000..3b50d33 --- /dev/null +++ b/sentry-javascript/18962/src/main.ts @@ -0,0 +1,13 @@ +import "./instrument"; + +import { NestFactory } from "@nestjs/core"; +import { AppModule } from "./app.module"; + +async function bootstrap() { + const app = await NestFactory.create(AppModule); + const port = process.env.PORT || 3000; + await app.listen(port); + console.log(`Server running at http://localhost:${port}`); + console.log(`Test streaming: curl http://localhost:${port}/stream-agent`); +} +bootstrap(); diff --git a/sentry-javascript/18962/src/stream.controller.ts b/sentry-javascript/18962/src/stream.controller.ts new file mode 100644 index 0000000..ceb9a08 --- /dev/null +++ b/sentry-javascript/18962/src/stream.controller.ts @@ -0,0 +1,148 @@ +// @ts-nocheck +import { Controller, Get, Res } from "@nestjs/common"; +import { Response } from "express"; +import { ChatOpenAI } from "@langchain/openai"; +import { createAgent } from "langchain"; + +@Controller() +export class StreamController { + @Get("/") + index() { + return { + message: "Reproduction for sentry-javascript#18962", + issue: "https://github.com/getsentry/sentry-javascript/issues/18962", + endpoints: { + "/stream-agent": + "Test LangGraph agent streaming (reproduces the bug)", + "/stream-model": + "Test direct ChatOpenAI model streaming (works fine)", + }, + sentryEnabled: process.env.ENABLE_SENTRY === "true", + }; + } + + // This endpoint reproduces the bug. + // With Sentry tracing enabled, the stream delivers the full response + // in one chunk instead of streaming incrementally. + @Get("/stream-agent") + async streamAgent(@Res() res: Response) { + try { + const agent = createAgent({ + model: "openai:gpt-4o-mini", + tools: [], + }); + + res.setHeader("Content-Type", "text/plain; charset=utf-8"); + res.setHeader("Transfer-Encoding", "chunked"); + + console.log("\n[agent.stream] Starting streaming request..."); + const startTime = Date.now(); + let chunkCount = 0; + let contentChunkCount = 0; + + // @ts-ignore - type instantiation issues with langchain generics + const stream = await agent.stream({ + messages: [ + { + role: "user", + content: + "Count from 1 to 20, each number on a new line. Be slow and deliberate.", + }, + ], + }, + { streamMode: ["messages"] }, + ); + + for await (const chunk of stream) { + chunkCount++; + const elapsed = Date.now() - startTime; + // chunk is ["messages", AIMessageChunk] or similar tuple + const msg = Array.isArray(chunk[1]) ? chunk[1][0] : chunk[1]; + const content = msg?.kwargs?.content ?? msg?.content ?? ""; + if (content) { + contentChunkCount++; + console.log( + ` [${elapsed}ms] Chunk ${contentChunkCount}: "${String(content).replace(/\n/g, "\\n")}"`, + ); + res.write(String(content)); + } + } + + const elapsed = Date.now() - startTime; + console.log( + `\n[agent.stream] Done in ${elapsed}ms, total chunks: ${chunkCount}, content chunks: ${contentChunkCount}`, + ); + + if (contentChunkCount < 10) { + console.log( + "WARNING: Very few content chunks received - streaming may be broken!", + ); + } + + res.end( + `\n\n--- Stats ---\nTotal chunks: ${chunkCount}\nContent chunks: ${contentChunkCount}\nTime: ${elapsed}ms\nSentry: ${process.env.ENABLE_SENTRY === "true" ? "ENABLED" : "disabled"}\n`, + ); + } catch (error: any) { + console.error("Error:", error.message); + if (!res.headersSent) { + res.status(500).json({ error: error.message }); + } else { + res.end(`\nError: ${error.message}`); + } + } + } + + // This endpoint uses direct model streaming for comparison. + // It typically works fine even with Sentry enabled. + @Get("/stream-model") + async streamModel(@Res() res: Response) { + try { + const model = new ChatOpenAI({ + model: "gpt-4o-mini", + temperature: 0, + streaming: true, + }); + + res.setHeader("Content-Type", "text/plain; charset=utf-8"); + res.setHeader("Transfer-Encoding", "chunked"); + + console.log("\n[model.stream] Starting streaming request..."); + const startTime = Date.now(); + let chunkCount = 0; + let contentChunkCount = 0; + + const stream = await model.stream( + "Count from 1 to 20, each number on a new line. Be slow and deliberate.", + ); + + for await (const chunk of stream) { + chunkCount++; + const elapsed = Date.now() - startTime; + const content = chunk.content.toString() || ""; + if (content) { + contentChunkCount++; + console.log( + ` [${elapsed}ms] Chunk ${contentChunkCount}: "${content.replace(/\n/g, "\\n")}"`, + ); + res.write(content); + } + } + + const elapsed = Date.now() - startTime; + console.log( + `\n[model.stream] Done in ${elapsed}ms, total chunks: ${chunkCount}, content chunks: ${contentChunkCount}`, + ); + + res.end( + `\n\n--- Stats ---\nTotal chunks: ${chunkCount}\nContent chunks: ${contentChunkCount}\nTime: ${elapsed}ms\nSentry: ${process.env.ENABLE_SENTRY === "true" ? "ENABLED" : "disabled"}\nMethod: direct model.stream()\n`, + ); + } catch (error: any) { + console.error("Error:", error.message); + if (!res.headersSent) { + res.status(500).json({ error: error.message }); + } else { + res.end(`\nError: ${error.message}`); + } + } + } +} diff --git a/sentry-javascript/18962/tsconfig.json b/sentry-javascript/18962/tsconfig.json new file mode 100644 index 0000000..95f5641 --- /dev/null +++ b/sentry-javascript/18962/tsconfig.json @@ -0,0 +1,21 @@ +{ + "compilerOptions": { + "module": "commonjs", + "declaration": true, + "removeComments": true, + "emitDecoratorMetadata": true, + "experimentalDecorators": true, + "allowSyntheticDefaultImports": true, + "target": "ES2021", + "sourceMap": true, + "outDir": "./dist", + "baseUrl": "./", + "incremental": true, + "skipLibCheck": true, + "strictNullChecks": false, + "noImplicitAny": false, + "strictBindCallApply": false, + "forceConsistentCasingInFileNames": false, + "noFallthroughCasesInSwitch": false + } +}