diff --git a/.changeset/fruity-webs-return.md b/.changeset/fruity-webs-return.md
new file mode 100644
index 000000000000..684672097026
--- /dev/null
+++ b/.changeset/fruity-webs-return.md
@@ -0,0 +1,6 @@
+---
+'@ai-sdk/openai': patch
+'@ai-sdk/azure': patch
+---
+
+enables code_interpreter and file_search capabilities in the Azure provider through the Responses API
diff --git a/content/providers/01-ai-sdk-providers/04-azure.mdx b/content/providers/01-ai-sdk-providers/04-azure.mdx
index 100297abb9aa..60ba144a5a1a 100644
--- a/content/providers/01-ai-sdk-providers/04-azure.mdx
+++ b/content/providers/01-ai-sdk-providers/04-azure.mdx
@@ -324,6 +324,69 @@ The following OpenAI-specific metadata is returned:
- **reasoningTokens** _number_
The number of reasoning tokens that the model generated.
+#### File Search
+
+The Azure OpenAI responses API supports file search through the `azure.tools.fileSearch` tool.
+
+You can force the use of the file search tool by setting the `toolChoice` parameter to `{ type: 'tool', toolName: 'file_search' }`.
+
+```ts
+const result = await generateText({
+ model: azure.responses('gpt-5'),
+ prompt: 'What does the document say about user authentication?',
+ tools: {
+ file_search: azure.tools.fileSearch({
+ // optional configuration:
+ vectorStoreIds: ['vs_123', 'vs_456'],
+ maxNumResults: 10,
+ ranking: {
+ ranker: 'auto',
+ },
+ }),
+ },
+ // Force file search tool:
+ toolChoice: { type: 'tool', toolName: 'file_search' },
+});
+```
+
+
+ The tool must be named `file_search` when using Azure OpenAI's file search
+ functionality. This name is required by Azure OpenAI's API specification and
+ cannot be customized.
+
+
+#### Code Interpreter
+
+The Azure OpenAI responses API supports the code interpreter tool through the `azure.tools.codeInterpreter` tool. This allows models to write and execute Python code.
+
+```ts
+import { azure } from '@ai-sdk/azure';
+import { generateText } from 'ai';
+
+const result = await generateText({
+ model: azure.responses('gpt-5'),
+ prompt: 'Write and run Python code to calculate the factorial of 10',
+ tools: {
+ code_interpreter: azure.tools.codeInterpreter({
+ // optional configuration:
+ container: {
+ fileIds: ['assistant-123', 'assistant-456'], // optional file IDs to make available
+ },
+ }),
+ },
+});
+```
+
+The code interpreter tool can be configured with:
+
+- **container**: Either a container ID string or an object with `fileIds` to specify uploaded files that should be available to the code interpreter
+
+
+ The tool must be named `code_interpreter` when using Azure OpenAI's code
+ interpreter functionality. This name is required by Azure OpenAI's API
+ specification and cannot be customized.
+
+
#### PDF support
The Azure OpenAI Responses API supports reading PDF files.
diff --git a/examples/ai-core/src/generate-text/azure-responses-code-interpreter.ts b/examples/ai-core/src/generate-text/azure-responses-code-interpreter.ts
new file mode 100644
index 000000000000..03924c553e97
--- /dev/null
+++ b/examples/ai-core/src/generate-text/azure-responses-code-interpreter.ts
@@ -0,0 +1,30 @@
+import { azure } from '@ai-sdk/azure';
+import { generateText } from 'ai';
+import 'dotenv/config';
+
+/**
+ * prepare
+ * Please add parameters in your .env file for initialize Azure OpenAI..
+ * AZURE_RESOURCE_NAME=""
+ * AZURE_API_KEY=""
+ */
+
+async function main() {
+ // Basic text generation
+ const basicResult = await generateText({
+ model: azure.responses('gpt-5-mini'),
+ prompt:
+ 'Create a program that generates five random numbers between 1 and 100 with two decimal places, and show me the execution results.',
+ tools: {
+ code_interpreter: azure.tools.codeInterpreter({}),
+ },
+ });
+
+ console.log('\n=== Basic Text Generation ===');
+ console.log(basicResult.text);
+ console.log('\n=== Other Outputs ===');
+ console.log(basicResult.toolCalls);
+ console.log(basicResult.toolResults);
+}
+
+main().catch(console.error);
diff --git a/examples/ai-core/src/generate-text/azure-responses-file-search.ts b/examples/ai-core/src/generate-text/azure-responses-file-search.ts
new file mode 100644
index 000000000000..b2deef94528b
--- /dev/null
+++ b/examples/ai-core/src/generate-text/azure-responses-file-search.ts
@@ -0,0 +1,44 @@
+import { azure } from '@ai-sdk/azure';
+import { generateText } from 'ai';
+import 'dotenv/config';
+
+/**
+ * prepare 1
+ * Please add parameters in your .env file for initialize Azure OpenAI.
+ * AZURE_RESOURCE_NAME=""
+ * AZURE_API_KEY=""
+ *
+ * prepare 2
+ * Please create vector store and put file in your vector.
+ * URL:AOAI vector store portal
+ * https://oai.azure.com/resource/vectorstore
+ */
+
+const VectorStoreId = 'vs_xxxxxxxxxxxxxxxxxxxxxxxx'; // put your vector store id.
+
+async function main() {
+ // Basic text generation
+ const basicResult = await generateText({
+ model: azure.responses('gpt-4.1-mini'),
+ prompt: 'What is quantum computing?', // please question about your documents.
+ tools: {
+ file_search: azure.tools.fileSearch({
+ // optional configuration:
+ vectorStoreIds: [VectorStoreId],
+ maxNumResults: 10,
+ ranking: {
+ ranker: 'auto',
+ },
+ }),
+ },
+ // Force file search tool:
+ toolChoice: { type: 'tool', toolName: 'file_search' },
+ });
+
+ console.log('\n=== Basic Text Generation ===');
+ console.log(basicResult.text);
+ console.log(basicResult.toolCalls);
+ console.log(basicResult.toolResults);
+}
+
+main().catch(console.error);
diff --git a/examples/ai-core/src/generate-text/openai-responses-code-interpreter.ts b/examples/ai-core/src/generate-text/openai-responses-code-interpreter.ts
new file mode 100644
index 000000000000..50744ffa37b2
--- /dev/null
+++ b/examples/ai-core/src/generate-text/openai-responses-code-interpreter.ts
@@ -0,0 +1,23 @@
+import { openai } from '@ai-sdk/openai';
+import { generateText } from 'ai';
+import 'dotenv/config';
+
+async function main() {
+ // Basic text generation
+ const basicResult = await generateText({
+ model: openai.responses('gpt-4.1-mini'),
+ prompt:
+ 'Create a program that generates five random numbers between 1 and 100 with two decimal places, and show me the execution results.',
+ tools: {
+ code_interpreter: openai.tools.codeInterpreter({}),
+ },
+ });
+
+ console.log('\n=== Basic Text Generation ===');
+ console.log(basicResult.text);
+ console.log('\n=== Other Outputs ===');
+ console.log(basicResult.toolCalls);
+ console.log(basicResult.toolResults);
+}
+
+main().catch(console.error);
diff --git a/examples/ai-core/src/generate-text/openai-responses-file-search.ts b/examples/ai-core/src/generate-text/openai-responses-file-search.ts
new file mode 100644
index 000000000000..7211051fe9f7
--- /dev/null
+++ b/examples/ai-core/src/generate-text/openai-responses-file-search.ts
@@ -0,0 +1,39 @@
+import { openai } from '@ai-sdk/openai';
+import { generateText } from 'ai';
+import 'dotenv/config';
+
+/**
+ * prepare
+ * Please create vector store and put file in your vector.
+ * URL:openai vector store dashboard
+ * https://platform.openai.com/storage/vector_stores/
+ */
+
+const VectorStoreId = 'vs_xxxxxxxxxxxxxxxxxxxxxxxx'; // put your vector store id.
+
+async function main() {
+ // Basic text generation
+ const basicResult = await generateText({
+ model: openai.responses('gpt-4.1-mini'),
+ prompt: 'What is quantum computing?', // please question about your documents.
+ tools: {
+ file_search: openai.tools.fileSearch({
+ // optional configuration:
+ vectorStoreIds: [VectorStoreId],
+ maxNumResults: 10,
+ ranking: {
+ ranker: 'auto',
+ },
+ }),
+ },
+ // Force file search tool:
+ toolChoice: { type: 'tool', toolName: 'file_search' },
+ });
+
+ console.log('\n=== Basic Text Generation ===');
+ console.log(basicResult.text);
+ console.dir(basicResult.toolCalls, { depth: null });
+ console.dir(basicResult.toolResults, { depth: null });
+}
+
+main().catch(console.error);
diff --git a/examples/ai-core/src/stream-text/azure-responses-code-interpreter.ts b/examples/ai-core/src/stream-text/azure-responses-code-interpreter.ts
new file mode 100644
index 000000000000..43d9d511dc62
--- /dev/null
+++ b/examples/ai-core/src/stream-text/azure-responses-code-interpreter.ts
@@ -0,0 +1,32 @@
+import { azure } from '@ai-sdk/azure';
+import { streamText } from 'ai';
+import 'dotenv/config';
+
+/**
+ * prepare
+ * Please add parameters in your .env file for initialize Azure OpenAI..
+ * AZURE_RESOURCE_NAME=""
+ * AZURE_API_KEY=""
+ */
+
+async function main() {
+ // Basic text generation
+ const result = streamText({
+ model: azure.responses('gpt-5-mini'),
+ prompt:
+ 'Create a program that generates five random numbers between 1 and 100 with two decimal places, and show me the execution results.',
+ tools: {
+ code_interpreter: azure.tools.codeInterpreter({}),
+ },
+ });
+
+ console.log('\n=== Basic Text Generation ===');
+ for await (const textPart of result.textStream) {
+ process.stdout.write(textPart);
+ }
+ console.log('\n=== Other Outputs ===');
+ console.log(await result.toolCalls);
+ console.log(await result.toolResults);
+}
+
+main().catch(console.error);
diff --git a/examples/ai-core/src/stream-text/azure-responses-file-search.ts b/examples/ai-core/src/stream-text/azure-responses-file-search.ts
new file mode 100644
index 000000000000..ec075033251d
--- /dev/null
+++ b/examples/ai-core/src/stream-text/azure-responses-file-search.ts
@@ -0,0 +1,48 @@
+import { azure } from '@ai-sdk/azure';
+import { streamText } from 'ai';
+import 'dotenv/config';
+
+/**
+ * prepare 1
+ * Please add parameters in your .env file for initialize Azure OpenAI.
+ * AZURE_RESOURCE_NAME=""
+ * AZURE_API_KEY=""
+ *
+ * prepare 2
+ * Please create vector store and put file in your vector.
+ * URL:AOAI vector store portal
+ * https://oai.azure.com/resource/vectorstore
+ */
+
+const VectorStoreId = 'vs_xxxxxxxxxxxxxxxxxxxxxxxx'; // put your vector store id.
+
+async function main() {
+ // Basic text generation
+ const result = await streamText({
+ model: azure.responses('gpt-4.1-mini'),
+ prompt: 'What is quantum computing?', // please question about your documents.
+ tools: {
+ file_search: azure.tools.fileSearch({
+ // optional configuration:
+ vectorStoreIds: [VectorStoreId],
+ maxNumResults: 10,
+ ranking: {
+ ranker: 'auto',
+ },
+ }),
+ },
+ // Force file search tool:
+ toolChoice: { type: 'tool', toolName: 'file_search' },
+ });
+
+ console.log('\n=== Basic Text Generation ===');
+ for await (const textPart of result.textStream) {
+ process.stdout.write(textPart);
+ }
+ console.log('\n=== Other Outputs ===');
+ console.dir(await result.toolCalls, { depth: Infinity });
+ console.dir(await result.toolResults, { depth: Infinity });
+ console.dir(await result.sources, { depth: Infinity });
+}
+
+main().catch(console.error);
diff --git a/examples/ai-core/src/stream-text/openai-responses-code-interpreter.ts b/examples/ai-core/src/stream-text/openai-responses-code-interpreter.ts
new file mode 100644
index 000000000000..9b229937f417
--- /dev/null
+++ b/examples/ai-core/src/stream-text/openai-responses-code-interpreter.ts
@@ -0,0 +1,25 @@
+import { openai } from '@ai-sdk/openai';
+import { streamText } from 'ai';
+import 'dotenv/config';
+
+async function main() {
+ // Basic text generation
+ const result = streamText({
+ model: openai.responses('gpt-4.1-mini'),
+ prompt:
+ 'Create a program that generates five random numbers between 1 and 100 with two decimal places, and show me the execution results.',
+ tools: {
+ code_interpreter: openai.tools.codeInterpreter({}),
+ },
+ });
+
+ console.log('\n=== Basic Text Generation ===');
+ for await (const textPart of result.textStream) {
+ process.stdout.write(textPart);
+ }
+ console.log('\n=== Other Outputs ===');
+ console.log(await result.toolCalls);
+ console.log(await result.toolResults);
+}
+
+main().catch(console.error);
diff --git a/examples/ai-core/src/stream-text/openai-responses-file-search.ts b/examples/ai-core/src/stream-text/openai-responses-file-search.ts
new file mode 100644
index 000000000000..384d286748ae
--- /dev/null
+++ b/examples/ai-core/src/stream-text/openai-responses-file-search.ts
@@ -0,0 +1,42 @@
+import { openai } from '@ai-sdk/openai';
+import { streamText } from 'ai';
+import 'dotenv/config';
+
+/**
+ * prepare
+ * Please create vector store and put file in your vector.
+ * URL:openai vector store dashboard
+ * https://platform.openai.com/storage/vector_stores/
+ */
+
+const VectorStoreId = 'vs_xxxxxxxxxxxxxxxxxxxxxxxx'; // put your vector store id.
+
+async function main() {
+ // Basic text generation
+ const result = await streamText({
+ model: openai.responses('gpt-4.1-mini'),
+ prompt: 'What is quantum computing?', // please question about your documents.
+ tools: {
+ file_search: openai.tools.fileSearch({
+ // optional configuration:
+ vectorStoreIds: [VectorStoreId],
+ maxNumResults: 10,
+ ranking: {
+ ranker: 'auto',
+ },
+ }),
+ },
+ // Force file search tool:
+ toolChoice: { type: 'tool', toolName: 'file_search' },
+ });
+
+ console.log('\n=== Basic Text Generation ===');
+ for await (const textPart of result.textStream) {
+ process.stdout.write(textPart);
+ }
+ console.log('\n=== Other Outputs ===');
+ console.dir(await result.toolCalls, { depth: null });
+ console.dir(await result.toolResults, { depth: null });
+}
+
+main().catch(console.error);
diff --git a/packages/azure/src/azure-openai-provider.test.ts b/packages/azure/src/azure-openai-provider.test.ts
index b6c6d6b34f00..0d4e75801d19 100644
--- a/packages/azure/src/azure-openai-provider.test.ts
+++ b/packages/azure/src/azure-openai-provider.test.ts
@@ -739,5 +739,86 @@ describe('responses', () => {
},
]);
});
+
+ it('should send include provider option for file search results', async () => {
+ prepareJsonResponse();
+
+ const { warnings } = await provider
+ .responses('test-deployment')
+ .doGenerate({
+ prompt: TEST_PROMPT,
+ tools: [
+ {
+ type: 'provider-defined',
+ id: 'openai.file_search',
+ name: 'file_search',
+ args: {
+ vectorStoreIds: ['vs_123', 'vs_456'],
+ maxNumResults: 10,
+ ranking: {
+ ranker: 'auto',
+ },
+ },
+ },
+ ],
+ });
+
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
+ {
+ "input": [
+ {
+ "content": [
+ {
+ "text": "Hello",
+ "type": "input_text",
+ },
+ ],
+ "role": "user",
+ },
+ ],
+ "model": "test-deployment",
+ "tools": [
+ {
+ "max_num_results": 10,
+ "ranking_options": {
+ "ranker": "auto",
+ },
+ "type": "file_search",
+ "vector_store_ids": [
+ "vs_123",
+ "vs_456",
+ ],
+ },
+ ],
+ }
+ `);
+
+ expect(warnings).toStrictEqual([]);
+ });
+
+ it('should forward include provider options to request body', async () => {
+ prepareJsonResponse();
+
+ const { warnings } = await provider
+ .responses('test-deployment')
+ .doGenerate({
+ prompt: TEST_PROMPT,
+ providerOptions: {
+ openai: {
+ include: ['file_search_call.results'],
+ },
+ },
+ });
+
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
+ model: 'test-deployment',
+ input: [
+ { role: 'user', content: [{ type: 'input_text', text: 'Hello' }] },
+ ],
+ include: ['file_search_call.results'],
+ });
+
+ expect(warnings).toStrictEqual([]);
+ });
});
});
diff --git a/packages/azure/src/azure-openai-provider.ts b/packages/azure/src/azure-openai-provider.ts
index 9355ee796d7d..86ebb0aab720 100644
--- a/packages/azure/src/azure-openai-provider.ts
+++ b/packages/azure/src/azure-openai-provider.ts
@@ -21,6 +21,7 @@ import {
loadSetting,
withUserAgentSuffix,
} from '@ai-sdk/provider-utils';
+import { azureOpenaiTools } from './azure-openai-tools';
import { VERSION } from './version';
export interface AzureOpenAIProvider extends ProviderV3 {
@@ -77,6 +78,11 @@ Creates an Azure OpenAI model for text embeddings.
* Creates an Azure OpenAI model for speech generation.
*/
speech(deploymentId: string): SpeechModelV2;
+
+ /**
+ * AzureOpenAI-specific tools.
+ */
+ tools: typeof azureOpenaiTools;
}
export interface AzureOpenAIProviderSettings {
@@ -247,6 +253,7 @@ export function createAzure(
provider.responses = createResponsesModel;
provider.transcription = createTranscriptionModel;
provider.speech = createSpeechModel;
+ provider.tools = azureOpenaiTools;
return provider;
}
diff --git a/packages/azure/src/azure-openai-tools.ts b/packages/azure/src/azure-openai-tools.ts
new file mode 100644
index 000000000000..15018f42574a
--- /dev/null
+++ b/packages/azure/src/azure-openai-tools.ts
@@ -0,0 +1,9 @@
+import { codeInterpreter, fileSearch } from '@ai-sdk/openai/internal';
+
+export const azureOpenaiTools: {
+ codeInterpreter: typeof codeInterpreter;
+ fileSearch: typeof fileSearch;
+} = {
+ codeInterpreter,
+ fileSearch,
+};
diff --git a/packages/openai/src/internal/index.ts b/packages/openai/src/internal/index.ts
index ab8e78cd7674..6e74ed5e850c 100644
--- a/packages/openai/src/internal/index.ts
+++ b/packages/openai/src/internal/index.ts
@@ -11,3 +11,5 @@ export * from '../transcription/openai-transcription-options';
export * from '../speech/openai-speech-model';
export * from '../speech/openai-speech-options';
export * from '../responses/openai-responses-language-model';
+export * from '../tool/code-interpreter';
+export * from '../tool/file-search';