From 3c1d40b9045a6e64872399717e60947c12db4f74 Mon Sep 17 00:00:00 2001 From: Bram Meerten Date: Thu, 3 Apr 2025 21:27:51 +0200 Subject: [PATCH 1/2] feat: Add finishReason field to NoObjectGeneratedError. This can indicate why an error such as JsonParseError occured, for example if finishReason is 'length' because max token size has been reached. --- .changeset/flat-plums-bake.md | 8 ++++++++ .../ai-no-object-generated-error.mdx | 2 ++ .../ai/core/generate-object/generate-object.test.ts | 1 + packages/ai/core/generate-object/generate-object.ts | 4 ++++ packages/ai/core/generate-object/output-strategy.ts | 8 +++++++- .../ai/core/generate-object/stream-object.test.ts | 6 ++++++ packages/ai/core/generate-object/stream-object.ts | 1 + packages/ai/core/generate-text/generate-text.ts | 6 +++++- packages/ai/core/generate-text/output.test.ts | 4 ++++ packages/ai/core/generate-text/output.ts | 5 +++++ packages/ai/errors/no-object-generated-error.ts | 13 +++++++++++++ 11 files changed, 56 insertions(+), 2 deletions(-) create mode 100644 .changeset/flat-plums-bake.md diff --git a/.changeset/flat-plums-bake.md b/.changeset/flat-plums-bake.md new file mode 100644 index 000000000000..7adf57552633 --- /dev/null +++ b/.changeset/flat-plums-bake.md @@ -0,0 +1,8 @@ +--- +'ai': minor +--- + +Add `finishReason` field to `NoObjectGeneratedError`, this can help clarify why an error occurred. `cause` is not always enough to determine the exact reason. + +For example if the max token size was reached, the (JSON) response will be incomplete. `cause` will be a `JSONParseError`, but the root cause is the token size which has been reached. +By adding `finishReason` (which info is already available where we throw the error) we can see that the value is 'length' and that the max token size has been reached. diff --git a/content/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx b/content/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx index bac1c98474ff..ef98315a0d31 100644 --- a/content/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx +++ b/content/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx @@ -18,6 +18,7 @@ It can arise due to the following reasons: - `text`: The text that was generated by the model. This can be the raw text or the tool call text, depending on the object generation mode. - `response`: Metadata about the language model response, including response id, timestamp, and model. - `usage`: Request token usage. +- `finishReason`: Request finish reason. For example 'length' if model generated maximum number of tokens, this could result in a JSON parsing error. - `cause`: The cause of the error (e.g. a JSON parsing error). You can use this for more detailed error handling. ## Checking for this Error @@ -36,6 +37,7 @@ try { console.log('Text:', error.text); console.log('Response:', error.response); console.log('Usage:', error.usage); + console.log('Finish Reason:', error.finishReason); } } ``` diff --git a/packages/ai/core/generate-object/generate-object.test.ts b/packages/ai/core/generate-object/generate-object.test.ts index 0686c05c4494..80747e4f286c 100644 --- a/packages/ai/core/generate-object/generate-object.test.ts +++ b/packages/ai/core/generate-object/generate-object.test.ts @@ -801,6 +801,7 @@ describe('output = "object"', () => { promptTokens: 10, totalTokens: 30, }, + finishReason: 'stop', }); } diff --git a/packages/ai/core/generate-object/generate-object.ts b/packages/ai/core/generate-object/generate-object.ts index 6284a12d8262..1a2e279d3726 100644 --- a/packages/ai/core/generate-object/generate-object.ts +++ b/packages/ai/core/generate-object/generate-object.ts @@ -556,6 +556,7 @@ export async function generateObject({ 'No object generated: the model did not return a response.', response: responseData, usage: calculateLanguageModelUsage(result.usage), + finishReason: result.finishReason, }); } @@ -681,6 +682,7 @@ export async function generateObject({ message: 'No object generated: the tool was not called.', response: responseData, usage: calculateLanguageModelUsage(result.usage), + finishReason: result.finishReason, }); } @@ -751,6 +753,7 @@ export async function generateObject({ text: result, response, usage: calculateLanguageModelUsage(usage), + finishReason: finishReason, }); } @@ -770,6 +773,7 @@ export async function generateObject({ text: result, response, usage: calculateLanguageModelUsage(usage), + finishReason: finishReason, }); } diff --git a/packages/ai/core/generate-object/output-strategy.ts b/packages/ai/core/generate-object/output-strategy.ts index 07c9d70ecd38..6da624bd331c 100644 --- a/packages/ai/core/generate-object/output-strategy.ts +++ b/packages/ai/core/generate-object/output-strategy.ts @@ -16,7 +16,11 @@ import { createAsyncIterableStream, } from '../util/async-iterable-stream'; import { ObjectStreamPart } from './stream-object-result'; -import { LanguageModelResponseMetadata, LanguageModelUsage } from '../types'; +import { + FinishReason, + LanguageModelResponseMetadata, + LanguageModelUsage, +} from '../types'; export interface OutputStrategy { readonly type: 'object' | 'array' | 'enum' | 'no-schema'; @@ -64,6 +68,7 @@ const noSchemaOutputStrategy: OutputStrategy = { text: string; response: LanguageModelResponseMetadata; usage: LanguageModelUsage; + finishReason: FinishReason; }, ): ValidationResult { return value === undefined @@ -74,6 +79,7 @@ const noSchemaOutputStrategy: OutputStrategy = { text: context.text, response: context.response, usage: context.usage, + finishReason: context.finishReason, }), } : { success: true, value }; diff --git a/packages/ai/core/generate-object/stream-object.test.ts b/packages/ai/core/generate-object/stream-object.test.ts index a645b1943159..0414a6d26c29 100644 --- a/packages/ai/core/generate-object/stream-object.test.ts +++ b/packages/ai/core/generate-object/stream-object.test.ts @@ -1311,6 +1311,7 @@ describe('streamObject', () => { modelId: 'model-1', }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + finishReason: 'stop', }); } }); @@ -1354,6 +1355,7 @@ describe('streamObject', () => { modelId: 'model-1', }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + finishReason: 'stop', }); } }); @@ -1403,6 +1405,7 @@ describe('streamObject', () => { modelId: 'model-1', }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + finishReason: 'stop', }); } }); @@ -1446,6 +1449,7 @@ describe('streamObject', () => { modelId: 'model-1', }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + finishReason: 'stop', }); } }); @@ -1488,6 +1492,7 @@ describe('streamObject', () => { modelId: 'model-1', }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + finishReason: 'stop', }); } }); @@ -1530,6 +1535,7 @@ describe('streamObject', () => { modelId: 'model-1', }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + finishReason: 'stop', }); } }); diff --git a/packages/ai/core/generate-object/stream-object.ts b/packages/ai/core/generate-object/stream-object.ts index 0cc5607bcf59..9f79cd1c3184 100644 --- a/packages/ai/core/generate-object/stream-object.ts +++ b/packages/ai/core/generate-object/stream-object.ts @@ -898,6 +898,7 @@ class DefaultStreamObjectResult text: accumulatedText, response, usage, + finishReason: finishReason, }); self.objectPromise.reject(error); } diff --git a/packages/ai/core/generate-text/generate-text.ts b/packages/ai/core/generate-text/generate-text.ts index 58b82664b47b..3f4293114b3f 100644 --- a/packages/ai/core/generate-text/generate-text.ts +++ b/packages/ai/core/generate-text/generate-text.ts @@ -576,7 +576,11 @@ A function that attempts to repair a tool call that failed to parse. return output.parseOutput( { text }, - { response: currentModelResponse.response, usage }, + { + response: currentModelResponse.response, + usage, + finishReason: currentModelResponse.finishReason, + }, ); }, toolCalls: currentToolCalls, diff --git a/packages/ai/core/generate-text/output.test.ts b/packages/ai/core/generate-text/output.test.ts index adde6bb5b610..d0a839036008 100644 --- a/packages/ai/core/generate-text/output.test.ts +++ b/packages/ai/core/generate-text/output.test.ts @@ -2,6 +2,7 @@ import { fail } from 'assert'; import { z } from 'zod'; import { verifyNoObjectGeneratedError } from '../../errors/no-object-generated-error'; import { object } from './output'; +import { FinishReason } from '../types'; const context = { response: { @@ -14,6 +15,7 @@ const context = { completionTokens: 2, totalTokens: 3, }, + finishReason: 'length' as FinishReason, }; describe('Output.object', () => { @@ -37,6 +39,7 @@ describe('Output.object', () => { message: 'No object generated: could not parse the response.', response: context.response, usage: context.usage, + finishReason: context.finishReason, }); } }); @@ -50,6 +53,7 @@ describe('Output.object', () => { message: 'No object generated: response did not match schema.', response: context.response, usage: context.usage, + finishReason: context.finishReason, }); } }); diff --git a/packages/ai/core/generate-text/output.ts b/packages/ai/core/generate-text/output.ts index 7d54fccbbb11..faa632a6f7a2 100644 --- a/packages/ai/core/generate-text/output.ts +++ b/packages/ai/core/generate-text/output.ts @@ -9,6 +9,7 @@ import { z } from 'zod'; import { NoObjectGeneratedError } from '../../errors'; import { injectJsonInstruction } from '../generate-object/inject-json-instruction'; import { + FinishReason, LanguageModel, LanguageModelV1CallOptions, } from '../types/language-model'; @@ -33,6 +34,7 @@ export interface Output { context: { response: LanguageModelResponseMetadata; usage: LanguageModelUsage; + finishReason: FinishReason; }, ): OUTPUT; } @@ -108,6 +110,7 @@ export const object = ({ context: { response: LanguageModelResponseMetadata; usage: LanguageModelUsage; + finishReason: FinishReason; }, ) { const parseResult = safeParseJSON({ text }); @@ -119,6 +122,7 @@ export const object = ({ text, response: context.response, usage: context.usage, + finishReason: context.finishReason, }); } @@ -134,6 +138,7 @@ export const object = ({ text, response: context.response, usage: context.usage, + finishReason: context.finishReason, }); } diff --git a/packages/ai/errors/no-object-generated-error.ts b/packages/ai/errors/no-object-generated-error.ts index 77ec7f5f1d52..06b7c05846c9 100644 --- a/packages/ai/errors/no-object-generated-error.ts +++ b/packages/ai/errors/no-object-generated-error.ts @@ -1,6 +1,7 @@ import { AISDKError } from '@ai-sdk/provider'; import { LanguageModelResponseMetadata } from '../core/types/language-model-response-metadata'; import { LanguageModelUsage } from '../core/types/usage'; +import { FinishReason } from '../core'; const name = 'AI_NoObjectGeneratedError'; const marker = `vercel.ai.error.${name}`; @@ -35,24 +36,32 @@ export class NoObjectGeneratedError extends AISDKError { */ readonly usage: LanguageModelUsage | undefined; + /** + Reason why the model finished generating a response. + */ + readonly finishReason: FinishReason | undefined; + constructor({ message = 'No object generated.', cause, text, response, usage, + finishReason, }: { message?: string; cause?: Error; text?: string; response: LanguageModelResponseMetadata; usage: LanguageModelUsage; + finishReason: FinishReason; }) { super({ name, message, cause }); this.text = text; this.response = response; this.usage = usage; + this.finishReason = finishReason; } static isInstance(error: unknown): error is NoObjectGeneratedError { @@ -66,6 +75,7 @@ export function verifyNoObjectGeneratedError( message: string; response: LanguageModelResponseMetadata; usage: LanguageModelUsage; + finishReason: FinishReason; }, ) { expect(NoObjectGeneratedError.isInstance(error)).toBeTruthy(); @@ -73,4 +83,7 @@ export function verifyNoObjectGeneratedError( expect(noObjectGeneratedError.message).toStrictEqual(expected.message); expect(noObjectGeneratedError.response).toStrictEqual(expected.response); expect(noObjectGeneratedError.usage).toStrictEqual(expected.usage); + expect(noObjectGeneratedError.finishReason).toStrictEqual( + expected.finishReason, + ); } From 46d9a1c7690957b76918edeaaccc0e3f9d822ed9 Mon Sep 17 00:00:00 2001 From: Bram Meerten Date: Fri, 4 Apr 2025 15:04:04 +0200 Subject: [PATCH 2/2] chore (core) Update changeset --- .changeset/flat-plums-bake.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.changeset/flat-plums-bake.md b/.changeset/flat-plums-bake.md index 7adf57552633..d7092eca4757 100644 --- a/.changeset/flat-plums-bake.md +++ b/.changeset/flat-plums-bake.md @@ -2,7 +2,4 @@ 'ai': minor --- -Add `finishReason` field to `NoObjectGeneratedError`, this can help clarify why an error occurred. `cause` is not always enough to determine the exact reason. - -For example if the max token size was reached, the (JSON) response will be incomplete. `cause` will be a `JSONParseError`, but the root cause is the token size which has been reached. -By adding `finishReason` (which info is already available where we throw the error) we can see that the value is 'length' and that the max token size has been reached. +feat (core): Add finishReason field to NoObjectGeneratedError