From 44fed8d5507cd33ed921fa49569b53ab989eae49 Mon Sep 17 00:00:00 2001 From: Justin Guckes Date: Tue, 10 Feb 2026 02:13:04 +0100 Subject: [PATCH] feat: update @anthropic-ai/sdk to version 0.74.0 and use real structured output --- .changeset/real-structured-output.md | 11 + packages/typescript/ai-anthropic/package.json | 2 +- .../ai-anthropic/src/adapters/text.ts | 210 +++++++++++++----- .../typescript/ai-anthropic/src/model-meta.ts | 25 +++ .../src/text/text-provider-options.ts | 59 +++-- .../ai-anthropic/src/tools/bash-tool.ts | 7 +- .../ai-anthropic/src/tools/index.ts | 7 + .../ai-anthropic/src/tools/tool-converter.ts | 12 +- .../tests/anthropic-adapter.test.ts | 155 +++++++++++-- pnpm-lock.yaml | 10 +- 10 files changed, 378 insertions(+), 120 deletions(-) create mode 100644 .changeset/real-structured-output.md diff --git a/.changeset/real-structured-output.md b/.changeset/real-structured-output.md new file mode 100644 index 00000000..4cc86361 --- /dev/null +++ b/.changeset/real-structured-output.md @@ -0,0 +1,11 @@ +--- +'@tanstack/ai-anthropic': minor +--- + +Use Anthropic's native structured output API instead of the tool-use workaround + +Upgrades `@anthropic-ai/sdk` from ^0.71.2 to ^0.74.0 and migrates structured output to use the GA `output_config.format` with `json_schema` type. Previously, structured output was emulated by forcing a tool call and extracting the input — this now uses Anthropic's first-class structured output support for more reliable schema-constrained responses. + +Also migrates streaming and tool types from `client.beta.messages` to the stable `client.messages` API, replacing beta type imports (`BetaToolChoiceAuto`, `BetaToolBash20241022`, `BetaRawMessageStreamEvent`, etc.) with their GA equivalents. + +**No breaking changes to runtime behavior.** However, this is a **type-level breaking change** for TypeScript consumers who import tool choice or streaming types directly: the beta type exports (`BetaToolChoiceAuto`, `BetaToolChoiceTool`, `BetaRawMessageStreamEvent`, etc.) have been replaced with their GA equivalents (`ToolChoiceAuto`, `ToolChoiceTool`, `RawMessageStreamEvent`, etc.) from `@anthropic-ai/sdk/resources/messages`. Consumers referencing these types will need to update both the import paths and the type names accordingly. diff --git a/packages/typescript/ai-anthropic/package.json b/packages/typescript/ai-anthropic/package.json index eecbe2a6..58ab8087 100644 --- a/packages/typescript/ai-anthropic/package.json +++ b/packages/typescript/ai-anthropic/package.json @@ -40,7 +40,7 @@ "test:types": "tsc" }, "dependencies": { - "@anthropic-ai/sdk": "^0.71.2" + "@anthropic-ai/sdk": "^0.74.0" }, "peerDependencies": { "@tanstack/ai": "workspace:^", diff --git a/packages/typescript/ai-anthropic/src/adapters/text.ts b/packages/typescript/ai-anthropic/src/adapters/text.ts index a4f41bbb..8c3139e7 100644 --- a/packages/typescript/ai-anthropic/src/adapters/text.ts +++ b/packages/typescript/ai-anthropic/src/adapters/text.ts @@ -1,31 +1,24 @@ import { BaseTextAdapter } from '@tanstack/ai/adapters' -import { convertToolsToProviderFormat } from '../tools/tool-converter' +import { ANTHROPIC_STRUCTURED_OUTPUT_MODELS } from '../model-meta' import { validateTextProviderOptions } from '../text/text-provider-options' +import { convertToolsToProviderFormat } from '../tools/tool-converter' import { createAnthropicClient, generateId, getAnthropicApiKeyFromEnv, } from '../utils' -import type { - ANTHROPIC_MODELS, - AnthropicChatModelProviderOptionsByName, - AnthropicModelInputModalitiesByName, -} from '../model-meta' -import type { - StructuredOutputOptions, - StructuredOutputResult, -} from '@tanstack/ai/adapters' +import type Anthropic_SDK from '@anthropic-ai/sdk' import type { Base64ImageSource, Base64PDFSource, DocumentBlockParam, ImageBlockParam, MessageParam, + RawMessageStreamEvent, TextBlockParam, URLImageSource, URLPDFSource, } from '@anthropic-ai/sdk/resources/messages' -import type Anthropic_SDK from '@anthropic-ai/sdk' import type { ContentPart, Modality, @@ -34,15 +27,24 @@ import type { TextOptions, } from '@tanstack/ai' import type { - ExternalTextProviderOptions, - InternalTextProviderOptions, -} from '../text/text-provider-options' + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' import type { AnthropicDocumentMetadata, AnthropicImageMetadata, AnthropicMessageMetadataByModality, AnthropicTextMetadata, } from '../message-types' +import type { + ANTHROPIC_MODELS, + AnthropicChatModelProviderOptionsByName, + AnthropicModelInputModalitiesByName, +} from '../model-meta' +import type { + ExternalTextProviderOptions, + InternalTextProviderOptions, +} from '../text/text-provider-options' import type { AnthropicClientConfig } from '../utils' /** @@ -115,13 +117,16 @@ export class AnthropicTextAdapter< this.client = createAnthropicClient(config) } + /** + * Stream chat completions from Anthropic, yielding AG-UI lifecycle chunks. + */ async *chatStream( options: TextOptions, ): AsyncIterable { try { const requestParams = this.mapCommonOptionsToAnthropic(options) - const stream = await this.client.beta.messages.create( + const stream = await this.client.messages.create( { ...requestParams, stream: true }, { signal: options.request?.signal, @@ -147,34 +152,110 @@ export class AnthropicTextAdapter< } /** - * Generate structured output using Anthropic's tool-based approach. - * Anthropic doesn't have native structured output, so we use a tool with the schema - * and force the model to call it. - * The outputSchema is already JSON Schema (converted in the ai layer). + * Generate structured output. + * Uses Anthropic's native `output_config` with `json_schema` for Claude 4+ models. + * Falls back to a tool-use workaround for older models that lack native support. */ async structuredOutput( options: StructuredOutputOptions, ): Promise> { const { chatOptions, outputSchema } = options - const requestParams = this.mapCommonOptionsToAnthropic(chatOptions) - // Create a tool that will capture the structured output - // Anthropic's SDK requires input_schema with type: 'object' literal + if (ANTHROPIC_STRUCTURED_OUTPUT_MODELS.has(chatOptions.model)) { + return this.nativeStructuredOutput( + requestParams, + chatOptions, + outputSchema, + ) + } + + return this.toolBasedStructuredOutput( + requestParams, + chatOptions, + outputSchema, + ) + } + + /** + * Native structured output using `output_config.format` with `json_schema`. + * Supported by Claude 4+ models. + */ + private async nativeStructuredOutput( + requestParams: InternalTextProviderOptions, + chatOptions: StructuredOutputOptions['chatOptions'], + outputSchema: StructuredOutputOptions['outputSchema'], + ): Promise> { + const createParams = { + ...requestParams, + stream: false as const, + output_config: { + format: { + type: 'json_schema' as const, + name: 'structured_output', + schema: outputSchema, + }, + }, + } + + let response: Awaited> + try { + response = await this.client.messages.create(createParams, { + signal: chatOptions.request?.signal, + headers: chatOptions.request?.headers, + }) + } catch (error: unknown) { + const err = error as Error + throw new Error( + `Structured output generation failed: ${err.message || 'Unknown error occurred'}`, + ) + } + + const rawText = response.content + .map((b) => { + if (b.type === 'text') { + return b.text + } + return '' + }) + .join('') + + let parsed: unknown + try { + parsed = JSON.parse(rawText) + } catch { + throw new Error( + `Failed to parse structured output JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, + ) + } + + return { data: parsed, rawText } + } + + /** + * Tool-based structured output fallback for older models (Claude 3.x). + * Creates a tool with the output schema and forces the model to call it. + */ + private async toolBasedStructuredOutput( + requestParams: InternalTextProviderOptions, + chatOptions: StructuredOutputOptions['chatOptions'], + outputSchema: StructuredOutputOptions['outputSchema'], + ): Promise> { const structuredOutputTool = { name: 'structured_output', description: 'Use this tool to provide your response in the required structured format.', input_schema: { + ...outputSchema, type: 'object' as const, properties: outputSchema.properties ?? {}, required: outputSchema.required ?? [], }, } + let response: Awaited> try { - // Make non-streaming request with tool_choice forced to our structured output tool - const response = await this.client.messages.create( + response = await this.client.messages.create( { ...requestParams, stream: false, @@ -186,50 +267,48 @@ export class AnthropicTextAdapter< headers: chatOptions.request?.headers, }, ) - - // Extract the tool use content from the response - let parsed: unknown = null - let rawText = '' - - for (const block of response.content) { - if (block.type === 'tool_use' && block.name === 'structured_output') { - parsed = block.input - rawText = JSON.stringify(block.input) - break - } - } - - if (parsed === null) { - // Fallback: try to extract text content and parse as JSON - rawText = response.content - .map((b) => { - if (b.type === 'text') { - return b.text - } - return '' - }) - .join('') - try { - parsed = JSON.parse(rawText) - } catch { - throw new Error( - `Failed to extract structured output from response. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, - ) - } - } - - return { - data: parsed, - rawText, - } } catch (error: unknown) { const err = error as Error throw new Error( `Structured output generation failed: ${err.message || 'Unknown error occurred'}`, ) } + + let parsed: unknown = null + let rawText = '' + + for (const block of response.content) { + if (block.type === 'tool_use' && block.name === 'structured_output') { + parsed = block.input + rawText = JSON.stringify(block.input) + break + } + } + + if (parsed === null) { + rawText = response.content + .map((b) => { + if (b.type === 'text') { + return b.text + } + return '' + }) + .join('') + try { + parsed = JSON.parse(rawText) + } catch { + throw new Error( + `Failed to extract structured output from response. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, + ) + } + } + + return { data: parsed, rawText } } + /** + * Map framework-agnostic text options to the Anthropic request format. + */ private mapCommonOptionsToAnthropic( options: TextOptions, ) { @@ -293,6 +372,9 @@ export class AnthropicTextAdapter< return requestParams } + /** + * Convert a framework-agnostic content part to an Anthropic content block. + */ private convertContentPartToAnthropic( part: ContentPart, ): TextBlockParam | ImageBlockParam | DocumentBlockParam { @@ -362,6 +444,9 @@ export class AnthropicTextAdapter< } } + /** + * Convert framework-agnostic messages to Anthropic's message format. + */ private formatMessages( messages: Array, ): InternalTextProviderOptions['messages'] { @@ -453,8 +538,11 @@ export class AnthropicTextAdapter< return formattedMessages } + /** + * Process a raw Anthropic SSE stream into AG-UI lifecycle chunks. + */ private async *processAnthropicStream( - stream: AsyncIterable, + stream: AsyncIterable, model: string, genId: () => string, ): AsyncIterable { diff --git a/packages/typescript/ai-anthropic/src/model-meta.ts b/packages/typescript/ai-anthropic/src/model-meta.ts index 1f642b86..27eaf5ca 100644 --- a/packages/typescript/ai-anthropic/src/model-meta.ts +++ b/packages/typescript/ai-anthropic/src/model-meta.ts @@ -20,6 +20,7 @@ interface ModelMeta< input: Array<'text' | 'image' | 'audio' | 'video' | 'document'> extended_thinking?: boolean priority_tier?: boolean + structured_output?: boolean } context_window?: number max_output_tokens?: number @@ -65,6 +66,7 @@ const CLAUDE_OPUS_4_6 = { input: ['text', 'image', 'document'], extended_thinking: true, priority_tier: true, + structured_output: true, }, } as const satisfies ModelMeta< AnthropicContainerOptions & @@ -95,6 +97,7 @@ const CLAUDE_OPUS_4_5 = { input: ['text', 'image', 'document'], extended_thinking: true, priority_tier: true, + structured_output: true, }, } as const satisfies ModelMeta< AnthropicContainerOptions & @@ -125,6 +128,7 @@ const CLAUDE_SONNET_4_5 = { input: ['text', 'image', 'document'], extended_thinking: true, priority_tier: true, + structured_output: true, }, } as const satisfies ModelMeta< AnthropicContainerOptions & @@ -155,6 +159,7 @@ const CLAUDE_HAIKU_4_5 = { input: ['text', 'image', 'document'], extended_thinking: true, priority_tier: true, + structured_output: true, }, } as const satisfies ModelMeta< AnthropicContainerOptions & @@ -185,6 +190,7 @@ const CLAUDE_OPUS_4_1 = { input: ['text', 'image', 'document'], extended_thinking: true, priority_tier: true, + structured_output: true, }, } as const satisfies ModelMeta< AnthropicContainerOptions & @@ -215,6 +221,7 @@ const CLAUDE_SONNET_4 = { input: ['text', 'image', 'document'], extended_thinking: true, priority_tier: true, + structured_output: true, }, } as const satisfies ModelMeta< AnthropicContainerOptions & @@ -244,6 +251,7 @@ const CLAUDE_SONNET_3_7 = { input: ['text', 'image', 'document'], extended_thinking: true, priority_tier: true, + structured_output: false, }, } as const satisfies ModelMeta< AnthropicContainerOptions & @@ -274,6 +282,7 @@ const CLAUDE_OPUS_4 = { input: ['text', 'image', 'document'], extended_thinking: true, priority_tier: true, + structured_output: true, }, } as const satisfies ModelMeta< AnthropicContainerOptions & @@ -304,6 +313,7 @@ const CLAUDE_HAIKU_3_5 = { input: ['text', 'image', 'document'], extended_thinking: false, priority_tier: true, + structured_output: false, }, } as const satisfies ModelMeta< AnthropicContainerOptions & @@ -334,6 +344,7 @@ const CLAUDE_HAIKU_3 = { input: ['text', 'image', 'document'], extended_thinking: false, priority_tier: false, + structured_output: false, }, } as const satisfies ModelMeta< AnthropicContainerOptions & @@ -404,6 +415,20 @@ export const ANTHROPIC_MODELS = [ CLAUDE_HAIKU_3.id, ] as const +/** + * Models that support Anthropic's native structured output API (output_config with json_schema). + * Only Claude 4+ models support this feature. + */ +export const ANTHROPIC_STRUCTURED_OUTPUT_MODELS: ReadonlySet = new Set([ + CLAUDE_OPUS_4_6.id, + CLAUDE_OPUS_4_5.id, + CLAUDE_SONNET_4_5.id, + CLAUDE_HAIKU_4_5.id, + CLAUDE_OPUS_4_1.id, + CLAUDE_SONNET_4.id, + CLAUDE_OPUS_4.id, +]) + // const ANTHROPIC_IMAGE_MODELS = [] as const // const ANTHROPIC_EMBEDDING_MODELS = [] as const // const ANTHROPIC_AUDIO_MODELS = [] as const diff --git a/packages/typescript/ai-anthropic/src/text/text-provider-options.ts b/packages/typescript/ai-anthropic/src/text/text-provider-options.ts index 8c4dfecc..518ddaad 100644 --- a/packages/typescript/ai-anthropic/src/text/text-provider-options.ts +++ b/packages/typescript/ai-anthropic/src/text/text-provider-options.ts @@ -1,13 +1,11 @@ -import type { - BetaContextManagementConfig, - BetaToolChoiceAny, - BetaToolChoiceAuto, - BetaToolChoiceTool, -} from '@anthropic-ai/sdk/resources/beta/messages/messages' -import type { AnthropicTool } from '../tools' +import type { BetaContextManagementConfig } from '@anthropic-ai/sdk/resources/beta/messages/messages' import type { MessageParam, TextBlockParam, + ToolChoiceAny, + ToolChoiceAuto, + ToolChoiceTool, + ToolUnion, } from '@anthropic-ai/sdk/resources/messages' export interface AnthropicContainerOptions { @@ -38,8 +36,8 @@ export interface AnthropicContainerOptions { export interface AnthropicContextManagementOptions { /** * Context management configuration. - -This allows you to control how Claude manages context across multiple requests, such as whether to clear function results or not. + * + * This allows you to control how Claude manages context across multiple requests, such as whether to clear function results or not. */ context_management?: BetaContextManagementConfig | null } @@ -62,10 +60,10 @@ export interface AnthropicServiceTierOptions { export interface AnthropicStopSequencesOptions { /** * Custom text sequences that will cause the model to stop generating. - -Anthropic models will normally stop when they have naturally completed their turn, which will result in a response stop_reason of "end_turn". - -If you want the model to stop generating when it encounters custom strings of text, you can use the stop_sequences parameter. If the model encounters one of the custom sequences, the response stop_reason value will be "stop_sequence" and the response stop_sequence value will contain the matched stop sequence. + * + * Anthropic models will normally stop when they have naturally completed their turn, which will result in a response stop_reason of "end_turn". + * + * If you want the model to stop generating when it encounters custom strings of text, you can use the stop_sequences parameter. If the model encounters one of the custom sequences, the response stop_reason value will be "stop_sequence" and the response stop_sequence value will contain the matched stop sequence. */ stop_sequences?: Array } @@ -73,15 +71,15 @@ If you want the model to stop generating when it encounters custom strings of te export interface AnthropicThinkingOptions { /** * Configuration for enabling Claude's extended thinking. - -When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer. Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit. + * + * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer. Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit. */ thinking?: | { /** * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough analysis for complex problems, improving response quality. - -Must be ≥1024 and less than max_tokens +* +* Must be ≥1024 and less than max_tokens */ budget_tokens: number @@ -93,17 +91,17 @@ Must be ≥1024 and less than max_tokens } export interface AnthropicToolChoiceOptions { - tool_choice?: BetaToolChoiceAny | BetaToolChoiceTool | BetaToolChoiceAuto + tool_choice?: ToolChoiceAny | ToolChoiceTool | ToolChoiceAuto } export interface AnthropicSamplingOptions { /** * Only sample from the top K options for each subsequent token. - -Used to remove "long tail" low probability responses. -Recommended for advanced use cases only. You usually only need to use temperature. - -Required range: x >= 0 + * + * Used to remove "long tail" low probability responses. + * Recommended for advanced use cases only. You usually only need to use temperature. + * + * Required range: x >= 0 */ top_k?: number } @@ -132,10 +130,11 @@ export interface InternalTextProviderOptions extends ExternalTextProviderOptions */ stream?: boolean /** - * stem prompt. - - A system prompt is a way of providing context and instructions to Claude, such as specifying a particular goal or role. - */ + * System prompt. + * + * A system prompt is a way of providing context and instructions to Claude, + * such as specifying a particular goal or role. + */ system?: string | Array /** * Amount of randomness injected into the response. @@ -145,12 +144,12 @@ export interface InternalTextProviderOptions extends ExternalTextProviderOptions */ temperature?: number - tools?: Array + tools?: Array /** * Use nucleus sampling. - -In nucleus sampling, we compute the cumulative distribution over all the options for each subsequent token in decreasing probability order and cut it off once it reaches a particular probability specified by top_p. You should either alter temperature or top_p, but not both. + * + * In nucleus sampling, we compute the cumulative distribution over all the options for each subsequent token in decreasing probability order and cut it off once it reaches a particular probability specified by top_p. You should either alter temperature or top_p, but not both. */ top_p?: number } diff --git a/packages/typescript/ai-anthropic/src/tools/bash-tool.ts b/packages/typescript/ai-anthropic/src/tools/bash-tool.ts index b1b6abea..e6400d8e 100644 --- a/packages/typescript/ai-anthropic/src/tools/bash-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/bash-tool.ts @@ -1,10 +1,7 @@ -import type { - BetaToolBash20241022, - BetaToolBash20250124, -} from '@anthropic-ai/sdk/resources/beta' +import type { ToolBash20250124 } from '@anthropic-ai/sdk/resources/messages' import type { Tool } from '@tanstack/ai' -export type BashTool = BetaToolBash20241022 | BetaToolBash20250124 +export type BashTool = ToolBash20250124 export function convertBashToolToAdapterFormat(tool: Tool): BashTool { const metadata = tool.metadata as BashTool diff --git a/packages/typescript/ai-anthropic/src/tools/index.ts b/packages/typescript/ai-anthropic/src/tools/index.ts index 5012b2b5..154de0f2 100644 --- a/packages/typescript/ai-anthropic/src/tools/index.ts +++ b/packages/typescript/ai-anthropic/src/tools/index.ts @@ -1,3 +1,4 @@ +import type { ToolUnion } from '@anthropic-ai/sdk/resources/messages' import type { BashTool } from './bash-tool' import type { CodeExecutionTool } from './code-execution-tool' import type { ComputerUseTool } from './computer-use-tool' @@ -7,7 +8,13 @@ import type { TextEditorTool } from './text-editor-tool' import type { WebFetchTool } from './web-fetch-tool' import type { WebSearchTool } from './web-search-tool' +/** + * Union of all Anthropic tool types supported by this adapter. + * Includes GA tools (via ToolUnion) and beta-only tools that + * have no GA equivalent yet. + */ export type AnthropicTool = + | ToolUnion | BashTool | CodeExecutionTool | ComputerUseTool diff --git a/packages/typescript/ai-anthropic/src/tools/tool-converter.ts b/packages/typescript/ai-anthropic/src/tools/tool-converter.ts index 4ca43c38..cab52b97 100644 --- a/packages/typescript/ai-anthropic/src/tools/tool-converter.ts +++ b/packages/typescript/ai-anthropic/src/tools/tool-converter.ts @@ -6,11 +6,11 @@ import { convertMemoryToolToAdapterFormat } from './memory-tool' import { convertTextEditorToolToAdapterFormat } from './text-editor-tool' import { convertWebFetchToolToAdapterFormat } from './web-fetch-tool' import { convertWebSearchToolToAdapterFormat } from './web-search-tool' -import type { AnthropicTool } from '.' +import type { ToolUnion } from '@anthropic-ai/sdk/resources/messages' import type { Tool } from '@tanstack/ai' /** - * Converts standard Tool format to Anthropic-specific tool format + * Converts standard Tool format to Anthropic-specific tool format. * * @param tools - Array of standard Tool objects * @returns Array of Anthropic-specific tool definitions @@ -32,10 +32,14 @@ import type { Tool } from '@tanstack/ai' * * const anthropicTools = convertToolsToProviderFormat(tools); * ``` + * + * Returns Array for compatibility with the stable messages API. + * Beta-only tools (ComputerUse, CodeExecution, Memory, WebFetch) are + * structurally compatible at runtime but not part of the GA ToolUnion type. */ export function convertToolsToProviderFormat( tools: Array, -): Array { +): Array { return tools.map((tool) => { const name = tool.name @@ -57,5 +61,5 @@ export function convertToolsToProviderFormat( default: return convertCustomToolToAdapterFormat(tool) } - }) + }) as Array } diff --git a/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts b/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts index 5e3db434..0b871810 100644 --- a/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts +++ b/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts @@ -5,28 +5,21 @@ import type { AnthropicTextProviderOptions } from '../src/adapters/text' import { z } from 'zod' const mocks = vi.hoisted(() => { - const betaMessagesCreate = vi.fn() const messagesCreate = vi.fn() const client = { - beta: { - messages: { - create: betaMessagesCreate, - }, - }, messages: { create: messagesCreate, }, } - return { betaMessagesCreate, messagesCreate, client } + return { messagesCreate, client } }) vi.mock('@anthropic-ai/sdk', () => { const { client } = mocks class MockAnthropic { - beta = client.beta messages = client.messages constructor(_: { apiKey: string }) {} @@ -35,9 +28,8 @@ vi.mock('@anthropic-ai/sdk', () => { return { default: MockAnthropic } }) -const createAdapter = ( - model: TModel, -) => new AnthropicTextAdapter({ apiKey: 'test-key' }, model) +const createAdapter = (model: TModel) => + new AnthropicTextAdapter({ apiKey: 'test-key' }, model as any) const toolArguments = JSON.stringify({ location: 'Berlin' }) @@ -77,7 +69,7 @@ describe('Anthropic adapter option mapping', () => { } })() - mocks.betaMessagesCreate.mockResolvedValueOnce(mockStream) + mocks.messagesCreate.mockResolvedValueOnce(mockStream) const providerOptions = { container: { @@ -132,8 +124,8 @@ describe('Anthropic adapter option mapping', () => { chunks.push(chunk) } - expect(mocks.betaMessagesCreate).toHaveBeenCalledTimes(1) - const [payload] = mocks.betaMessagesCreate.mock.calls[0] + expect(mocks.messagesCreate).toHaveBeenCalledTimes(1) + const [payload] = mocks.messagesCreate.mock.calls[0] expect(payload).toMatchObject({ model: 'claude-3-7-sonnet-20250219', @@ -184,3 +176,138 @@ describe('Anthropic adapter option mapping', () => { }) }) }) + +describe('Anthropic structured output', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('sends output_config with json_schema and parses JSON response', async () => { + const mockResponse = { + content: [{ type: 'text', text: '{"name":"Alice","age":30}' }], + } + mocks.messagesCreate.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter('claude-sonnet-4') + + const result = await adapter.structuredOutput({ + chatOptions: { + model: 'claude-sonnet-4', + messages: [{ role: 'user', content: 'Return a person object' }], + maxTokens: 1024, + }, + outputSchema: { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + }, + required: ['name', 'age'], + }, + }) + + expect(result).toEqual({ + data: { name: 'Alice', age: 30 }, + rawText: '{"name":"Alice","age":30}', + }) + + expect(mocks.messagesCreate).toHaveBeenCalledTimes(1) + const [payload] = mocks.messagesCreate.mock.calls[0] + expect(payload.stream).toBe(false) + expect(payload.output_config).toEqual({ + format: { + type: 'json_schema', + name: 'structured_output', + schema: { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + }, + required: ['name', 'age'], + }, + }, + }) + }) + + it('throws when response is not valid JSON', async () => { + const mockResponse = { + content: [{ type: 'text', text: 'not valid json' }], + } + mocks.messagesCreate.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter('claude-sonnet-4') + + await expect( + adapter.structuredOutput({ + chatOptions: { + model: 'claude-sonnet-4', + messages: [{ role: 'user', content: 'Return a person object' }], + maxTokens: 1024, + }, + outputSchema: { type: 'object' }, + }), + ).rejects.toThrow('Failed to parse structured output JSON') + }) + + it('falls back to tool-use for older models', async () => { + const mockResponse = { + content: [ + { + type: 'tool_use', + id: 'toolu_123', + name: 'structured_output', + input: { name: 'Bob', age: 25 }, + }, + ], + } + mocks.messagesCreate.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter('claude-3-7-sonnet') + + const result = await adapter.structuredOutput({ + chatOptions: { + model: 'claude-3-7-sonnet', + messages: [{ role: 'user', content: 'Return a person object' }], + maxTokens: 1024, + }, + outputSchema: { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + }, + required: ['name', 'age'], + }, + }) + + expect(result).toEqual({ + data: { name: 'Bob', age: 25 }, + rawText: '{"name":"Bob","age":25}', + }) + + expect(mocks.messagesCreate).toHaveBeenCalledTimes(1) + const [payload] = mocks.messagesCreate.mock.calls[0] + expect(payload.stream).toBe(false) + expect(payload.output_config).toBeUndefined() + expect(payload.tool_choice).toEqual({ + type: 'tool', + name: 'structured_output', + }) + expect(payload.tools).toEqual([ + { + name: 'structured_output', + description: + 'Use this tool to provide your response in the required structured format.', + input_schema: { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + }, + required: ['name', 'age'], + }, + }, + ]) + }) +}) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index b9481e64..938e1a52 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -625,8 +625,8 @@ importers: packages/typescript/ai-anthropic: dependencies: '@anthropic-ai/sdk': - specifier: ^0.71.2 - version: 0.71.2(zod@4.2.1) + specifier: ^0.74.0 + version: 0.74.0(zod@4.2.1) devDependencies: '@tanstack/ai': specifier: workspace:* @@ -1343,8 +1343,8 @@ packages: peerDependencies: zod: ^4.0.5 - '@anthropic-ai/sdk@0.71.2': - resolution: {integrity: sha512-TGNDEUuEstk/DKu0/TflXAEt+p+p/WhTlFzEnoosvbaDU2LTjm42igSdlL0VijrKpWejtOKxX0b8A7uc+XiSAQ==} + '@anthropic-ai/sdk@0.74.0': + resolution: {integrity: sha512-srbJV7JKsc5cQ6eVuFzjZO7UR3xEPJqPamHFIe29bs38Ij2IripoAhC0S5NslNbaFUYqBKypmmpzMTpqfHEUDw==} hasBin: true peerDependencies: zod: ^3.25.0 || ^4.0.0 @@ -8774,7 +8774,7 @@ snapshots: dependencies: zod: 4.2.1 - '@anthropic-ai/sdk@0.71.2(zod@4.2.1)': + '@anthropic-ai/sdk@0.74.0(zod@4.2.1)': dependencies: json-schema-to-ts: 3.1.1 optionalDependencies: