diff --git a/sentry-javascript/18962/.gitignore b/sentry-javascript/18962/.gitignore new file mode 100644 index 0000000..e44b27c --- /dev/null +++ b/sentry-javascript/18962/.gitignore @@ -0,0 +1,4 @@ +node_modules/ +dist/ +package-lock.json +.env diff --git a/sentry-javascript/18962/README.md b/sentry-javascript/18962/README.md new file mode 100644 index 0000000..89bd227 --- /dev/null +++ b/sentry-javascript/18962/README.md @@ -0,0 +1,102 @@ +# Reproduction for sentry-javascript#18962 + +**Issue:** https://github.com/getsentry/sentry-javascript/issues/18962 + +## Description + +This reproduction attempts to demonstrate the reported issue where OpenAI streaming breaks when Sentry is initialized in a NestJS application with `@sentry/nestjs`. + +**Note:** In our testing, streaming works correctly both with and without Sentry enabled. We need more details from the issue reporter to reproduce the exact issue. + +## Setup + +This is a NestJS application that uses: +- `@sentry/nestjs` v10.x (with OpenAI and LangChain integrations) +- `@langchain/openai` for LangChain integration + +## Steps to Reproduce + +1. Install dependencies: + ```bash + npm install + ``` + +2. Create a `.env` file with your configuration: + ```bash + OPENAI_API_KEY=sk-your-key-here + SENTRY_DSN=https://your-dsn@sentry.io/project + ENABLE_SENTRY=true + ``` + + Set `ENABLE_SENTRY=true` to enable Sentry, or omit/set to any other value to disable. + +3. Build the TypeScript: + ```bash + npm run build + ``` + +4. Run the server: + ```bash + npm run start + ``` + +5. Test streaming: + ```bash + curl http://localhost:3000/stream-langchain + curl http://localhost:3000/stream-openai + ``` + +## Available Endpoints + +- `GET /` - Status and available endpoints +- `GET /stream-openai` - Test OpenAI streaming via LangChain's ChatOpenAI +- `GET /stream-langchain` - Test OpenAI streaming via LangChain + +## Expected Behavior (per issue) + +According to the issue, with Sentry enabled: +- Streaming should break +- Only the final complete response should come through (1-2 chunks) +- Real-time streaming behavior should not work + +## Actual Behavior (our testing) + +In our testing with `@sentry/nestjs@10.36.0`: +- Streaming works correctly with Sentry enabled +- 40+ chunks received for all streaming methods +- No difference between Sentry enabled/disabled + +## Environment + +- Node.js: 22.x +- @sentry/nestjs: 10.36.0 +- @nestjs/core: 10.x +- @langchain/openai: 1.x + +## Questions for Issue Reporter + +To help reproduce the issue, please provide: + +1. **Exact SDK version**: What specific version of `@sentry/nestjs` are you using? +2. **Node.js version**: What Node.js version? +3. **Sentry.init config**: Full Sentry initialization config +4. **NestJS setup**: Any interceptors, middleware, or guards that might affect responses? +5. **OpenAI usage**: Exact code showing how you're using OpenAI/LangChain +6. **Network config**: Any proxies or custom HTTP agents? + +## Workaround (from issue) + +The issue reporter found that `defaultIntegrations: false` helps, suggesting one of the default integrations causes the problem. Try narrowing down which integration: + +```typescript +Sentry.init({ + dsn: '...', + // Try disabling specific integrations to find the culprit + integrations: (integrations) => { + return integrations.filter((integration) => { + // Try filtering different integrations + return integration.name !== 'OpenAI'; + }); + }, +}); +``` diff --git a/sentry-javascript/18962/nest-cli.json b/sentry-javascript/18962/nest-cli.json new file mode 100644 index 0000000..2566481 --- /dev/null +++ b/sentry-javascript/18962/nest-cli.json @@ -0,0 +1,5 @@ +{ + "$schema": "https://json.schemastore.org/nest-cli", + "collection": "@nestjs/schematics", + "sourceRoot": "src" +} diff --git a/sentry-javascript/18962/package.json b/sentry-javascript/18962/package.json new file mode 100644 index 0000000..04a452f --- /dev/null +++ b/sentry-javascript/18962/package.json @@ -0,0 +1,26 @@ +{ + "name": "sentry-nestjs-openai-streaming-repro", + "version": "1.0.0", + "description": "Reproduction for sentry-javascript#18962 - Sentry breaking OpenAI streaming in NestJS", + "main": "dist/main.js", + "scripts": { + "build": "nest build", + "start": "nest start", + "start:dev": "nest start --watch" + }, + "dependencies": { + "@langchain/openai": "^1.2.3", + "@nestjs/common": "^10.0.0", + "@nestjs/core": "^10.0.0", + "@nestjs/platform-express": "^10.0.0", + "@sentry/nestjs": "^10.0.0", + "langchain": "^1.2.14", + "reflect-metadata": "^0.2.0", + "rxjs": "^7.8.1" + }, + "devDependencies": { + "@nestjs/cli": "^10.0.0", + "@types/node": "^22.0.0", + "typescript": "^5.0.0" + } +} diff --git a/sentry-javascript/18962/src/app.module.ts b/sentry-javascript/18962/src/app.module.ts new file mode 100644 index 0000000..a394fdc --- /dev/null +++ b/sentry-javascript/18962/src/app.module.ts @@ -0,0 +1,9 @@ +import { Module } from '@nestjs/common'; +import { StreamController } from './stream.controller'; + +@Module({ + imports: [], + controllers: [StreamController], + providers: [], +}) +export class AppModule {} diff --git a/sentry-javascript/18962/src/instrument.ts b/sentry-javascript/18962/src/instrument.ts new file mode 100644 index 0000000..28864fb --- /dev/null +++ b/sentry-javascript/18962/src/instrument.ts @@ -0,0 +1,19 @@ +// Sentry instrumentation file for NestJS +// This is loaded BEFORE the application starts via --import flag +import * as Sentry from "@sentry/nestjs"; + +if (process.env.ENABLE_SENTRY === "true") { + console.log("šŸ” Initializing Sentry with @sentry/nestjs..."); + + Sentry.init({ + dsn: process.env.SENTRY_DSN || "", + environment: "local", + tracesSampleRate: 1.0, + debug: true, + sendDefaultPii: true, + }); + + console.log("āœ… Sentry initialized"); +} else { + console.log("ā­ļø Sentry disabled (ENABLE_SENTRY != true)"); +} diff --git a/sentry-javascript/18962/src/main.ts b/sentry-javascript/18962/src/main.ts new file mode 100644 index 0000000..3001baf --- /dev/null +++ b/sentry-javascript/18962/src/main.ts @@ -0,0 +1,15 @@ +// Import this first! +import "./instrument"; + +import { NestFactory } from "@nestjs/core"; +import { AppModule } from "./app.module"; + +async function bootstrap() { + const app = await NestFactory.create(AppModule); + const port = process.env.PORT || 3000; + await app.listen(port); + console.log(`\nšŸš€ NestJS server running at http://localhost:${port}`); + console.log(`\nšŸ“ Test streaming: curl http://localhost:${port}/stream`); + console.log(` Or open in browser: http://localhost:${port}/stream\n`); +} +bootstrap(); diff --git a/sentry-javascript/18962/src/stream.controller.ts b/sentry-javascript/18962/src/stream.controller.ts new file mode 100644 index 0000000..0be7355 --- /dev/null +++ b/sentry-javascript/18962/src/stream.controller.ts @@ -0,0 +1,146 @@ +import { Controller, Get, Res } from "@nestjs/common"; +import { Response } from "express"; +import { ChatOpenAI } from "@langchain/openai"; +import { HumanMessage } from "@langchain/core/messages"; + +@Controller() +export class StreamController { + private langchainModel: ChatOpenAI | null = null; + + private getLangchainModel(): ChatOpenAI { + if (!this.langchainModel) { + if (!process.env.OPENAI_API_KEY) { + throw new Error( + "OPENAI_API_KEY not set. Export it with: export OPENAI_API_KEY=sk-..." + ); + } + this.langchainModel = new ChatOpenAI({ + model: "gpt-4o-mini", + temperature: 0, + streaming: true, + }); + } + return this.langchainModel; + } + + @Get("/") + index() { + return { + message: "Sentry NestJS OpenAI Streaming Reproduction", + issue: "https://github.com/getsentry/sentry-javascript/issues/18962", + endpoints: { + "/stream-openai": "Test OpenAI streaming with for-await (GET)", + "/stream-langchain": "Test OpenAI streaming via Langchain (GET)", + }, + sentryEnabled: process.env.ENABLE_SENTRY === "true", + openaiKeySet: !!process.env.OPENAI_API_KEY, + }; + } + + @Get("/stream-openai") + async stream(@Res() res: Response) { + try { + const openai = new ChatOpenAI({ + model: "gpt-4o-mini", + temperature: 0, + maxTokens: undefined, + timeout: undefined, + maxRetries: 2, + }); + + res.setHeader("Content-Type", "text/plain; charset=utf-8"); + res.setHeader("Transfer-Encoding", "chunked"); + + console.log("\nšŸ“ [OpenAI for-await] Starting streaming request..."); + const startTime = Date.now(); + let chunkCount = 0; + + const stream = await openai.stream( + "Count from 1 to 20, each number on a new line. Be slow and deliberate." + ); + + for await (const chunk of stream) { + chunkCount++; + const content = chunk.content.toString() || ""; + if (content) { + console.log( + ` Chunk ${chunkCount}: "${content.replace(/\n/g, "\\n")}"` + ); + res.write(content); + } + } + + const elapsed = Date.now() - startTime; + console.log( + `\nāœ… [OpenAI for-await] Completed in ${elapsed}ms, chunks: ${chunkCount}` + ); + + if (chunkCount < 10) { + console.log( + "\nāš ļø WARNING: Very few chunks received - streaming may be broken!" + ); + } + + res.end( + `\n\n--- Stats ---\nChunks: ${chunkCount}\nTime: ${elapsed}ms\nSentry: ${ + process.env.ENABLE_SENTRY === "true" ? "ENABLED" : "disabled" + }\n` + ); + } catch (error) { + console.error("āŒ Error:", error.message); + res.status(500).json({ error: error.message }); + } + } + + @Get("/stream-langchain") + async streamLangchain(@Res() res: Response) { + try { + const model = this.getLangchainModel(); + + res.setHeader("Content-Type", "text/plain; charset=utf-8"); + res.setHeader("Transfer-Encoding", "chunked"); + + console.log("\nšŸ“ [Langchain] Starting streaming request..."); + const startTime = Date.now(); + let chunkCount = 0; + + // Using Langchain's streaming with .stream() + const stream = await model.stream([ + new HumanMessage( + "Count from 1 to 20, each number on a new line. Be slow and deliberate." + ), + ]); + + for await (const chunk of stream) { + chunkCount++; + const content = chunk.content; + if (content) { + console.log( + ` Chunk ${chunkCount}: "${String(content).replace(/\n/g, "\\n")}"` + ); + res.write(String(content)); + } + } + + const elapsed = Date.now() - startTime; + console.log( + `\nāœ… [Langchain] Completed in ${elapsed}ms, chunks: ${chunkCount}` + ); + + if (chunkCount < 10) { + console.log( + "\nāš ļø WARNING: Very few chunks received - streaming may be broken!" + ); + } + + res.end( + `\n\n--- Stats ---\nChunks: ${chunkCount}\nTime: ${elapsed}ms\nSentry: ${ + process.env.ENABLE_SENTRY === "true" ? "ENABLED" : "disabled" + }\nMethod: Langchain\n` + ); + } catch (error) { + console.error("āŒ Langchain Error:", error.message); + res.status(500).json({ error: error.message }); + } + } +} diff --git a/sentry-javascript/18962/tsconfig.json b/sentry-javascript/18962/tsconfig.json new file mode 100644 index 0000000..95f5641 --- /dev/null +++ b/sentry-javascript/18962/tsconfig.json @@ -0,0 +1,21 @@ +{ + "compilerOptions": { + "module": "commonjs", + "declaration": true, + "removeComments": true, + "emitDecoratorMetadata": true, + "experimentalDecorators": true, + "allowSyntheticDefaultImports": true, + "target": "ES2021", + "sourceMap": true, + "outDir": "./dist", + "baseUrl": "./", + "incremental": true, + "skipLibCheck": true, + "strictNullChecks": false, + "noImplicitAny": false, + "strictBindCallApply": false, + "forceConsistentCasingInFileNames": false, + "noFallthroughCasesInSwitch": false + } +}