diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
index 944705898f1..0f86ca30de2 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
@@ -37,6 +37,7 @@ protected ChatOptions(ChatOptions? other)
ModelId = other.ModelId;
PresencePenalty = other.PresencePenalty;
RawRepresentationFactory = other.RawRepresentationFactory;
+ Reasoning = other.Reasoning?.Clone();
ResponseFormat = other.ResponseFormat;
Seed = other.Seed;
Temperature = other.Temperature;
@@ -108,6 +109,11 @@ protected ChatOptions(ChatOptions? other)
/// Gets or sets a seed value used by a service to control the reproducibility of results.
public long? Seed { get; set; }
+ ///
+ /// Gets or sets the reasoning options for the chat request.
+ ///
+ public ReasoningOptions? Reasoning { get; set; }
+
///
/// Gets or sets the response format for the chat request.
///
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
new file mode 100644
index 00000000000..a1b40dbdae4
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
@@ -0,0 +1,40 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace Microsoft.Extensions.AI;
+
+///
+/// Specifies the level of reasoning effort that should be applied when generating chat responses.
+///
+///
+/// This value suggests how much computational effort the model should put into reasoning.
+/// Higher values may result in more thoughtful responses but with increased latency and token usage.
+/// The specific interpretation and support for each level may vary between providers or even between models from the same provider.
+///
+public enum ReasoningEffort
+{
+ ///
+ /// No reasoning effort.
+ ///
+ None,
+
+ ///
+ /// Low reasoning effort. Minimal reasoning for faster responses.
+ ///
+ Low,
+
+ ///
+ /// Medium reasoning effort. Balanced reasoning for most use cases.
+ ///
+ Medium,
+
+ ///
+ /// High reasoning effort. Extensive reasoning for complex tasks.
+ ///
+ High,
+
+ ///
+ /// Extra high reasoning effort. Maximum reasoning for the most demanding tasks.
+ ///
+ ExtraHigh,
+}
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
new file mode 100644
index 00000000000..7f302461490
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
@@ -0,0 +1,46 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace Microsoft.Extensions.AI;
+
+///
+/// Represents options for configuring reasoning behavior in chat requests.
+///
+///
+///
+/// Reasoning options allow control over how much computational effort the model
+/// should put into reasoning about the response, and how that reasoning should
+/// be exposed to the caller.
+///
+///
+/// Not all providers support all reasoning options. Implementations should
+/// make a best-effort attempt to map the requested options to the provider's
+/// capabilities. If a provider or model doesn't support reasoning or doesn't support the requested configuration of reasoning, these options may be ignored.
+///
+///
+public sealed class ReasoningOptions
+{
+ ///
+ /// Gets or sets the level of reasoning effort to apply.
+ ///
+ ///
+ /// The reasoning effort level, or to use the provider's default.
+ ///
+ public ReasoningEffort? Effort { get; set; }
+
+ ///
+ /// Gets or sets how reasoning content should be included in the response.
+ ///
+ ///
+ /// The reasoning output mode, or to use the provider's default.
+ ///
+ public ReasoningOutput? Output { get; set; }
+
+ /// Creates a shallow clone of this instance.
+ /// A shallow clone of this instance.
+ internal ReasoningOptions Clone() => new()
+ {
+ Effort = Effort,
+ Output = Output,
+ };
+}
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
new file mode 100644
index 00000000000..9250c81a5c8
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
@@ -0,0 +1,29 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace Microsoft.Extensions.AI;
+
+///
+/// Specifies how reasoning content should be included in the response.
+///
+///
+/// Some providers support including reasoning or thinking traces in the response.
+/// This setting controls whether and how that reasoning content is exposed.
+///
+public enum ReasoningOutput
+{
+ ///
+ /// No reasoning output. Do not include reasoning content in the response.
+ ///
+ None,
+
+ ///
+ /// Summary reasoning output. Include a summary of the reasoning process.
+ ///
+ Summary,
+
+ ///
+ /// Full reasoning output. Include all reasoning content in the response.
+ ///
+ Full,
+}
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/Microsoft.Extensions.AI.Abstractions.json b/src/Libraries/Microsoft.Extensions.AI.Abstractions/Microsoft.Extensions.AI.Abstractions.json
index e6ad6cc28b6..4b7da685831 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/Microsoft.Extensions.AI.Abstractions.json
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/Microsoft.Extensions.AI.Abstractions.json
@@ -1033,6 +1033,10 @@
"Member": "System.Func? Microsoft.Extensions.AI.ChatOptions.RawRepresentationFactory { get; set; }",
"Stage": "Stable"
},
+ {
+ "Member": "Microsoft.Extensions.AI.ReasoningOptions? Microsoft.Extensions.AI.ChatOptions.Reasoning { get; set; }",
+ "Stage": "Stable"
+ },
{
"Member": "Microsoft.Extensions.AI.ChatResponseFormat? Microsoft.Extensions.AI.ChatOptions.ResponseFormat { get; set; }",
"Stage": "Stable"
@@ -2111,6 +2115,90 @@
}
]
},
+ {
+ "Type": "enum Microsoft.Extensions.AI.ReasoningEffort",
+ "Stage": "Stable",
+ "Methods": [
+ {
+ "Member": "Microsoft.Extensions.AI.ReasoningEffort.ReasoningEffort();",
+ "Stage": "Stable"
+ }
+ ],
+ "Fields": [
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningEffort Microsoft.Extensions.AI.ReasoningEffort.None",
+ "Stage": "Stable",
+ "Value": "0"
+ },
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningEffort Microsoft.Extensions.AI.ReasoningEffort.Low",
+ "Stage": "Stable",
+ "Value": "1"
+ },
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningEffort Microsoft.Extensions.AI.ReasoningEffort.Medium",
+ "Stage": "Stable",
+ "Value": "2"
+ },
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningEffort Microsoft.Extensions.AI.ReasoningEffort.High",
+ "Stage": "Stable",
+ "Value": "3"
+ },
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningEffort Microsoft.Extensions.AI.ReasoningEffort.ExtraHigh",
+ "Stage": "Stable",
+ "Value": "4"
+ }
+ ]
+ },
+ {
+ "Type": "sealed class Microsoft.Extensions.AI.ReasoningOptions",
+ "Stage": "Stable",
+ "Methods": [
+ {
+ "Member": "Microsoft.Extensions.AI.ReasoningOptions.ReasoningOptions();",
+ "Stage": "Stable"
+ }
+ ],
+ "Properties": [
+ {
+ "Member": "Microsoft.Extensions.AI.ReasoningEffort? Microsoft.Extensions.AI.ReasoningOptions.Effort { get; set; }",
+ "Stage": "Stable"
+ },
+ {
+ "Member": "Microsoft.Extensions.AI.ReasoningOutput? Microsoft.Extensions.AI.ReasoningOptions.Output { get; set; }",
+ "Stage": "Stable"
+ }
+ ]
+ },
+ {
+ "Type": "enum Microsoft.Extensions.AI.ReasoningOutput",
+ "Stage": "Stable",
+ "Methods": [
+ {
+ "Member": "Microsoft.Extensions.AI.ReasoningOutput.ReasoningOutput();",
+ "Stage": "Stable"
+ }
+ ],
+ "Fields": [
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningOutput Microsoft.Extensions.AI.ReasoningOutput.None",
+ "Stage": "Stable",
+ "Value": "0"
+ },
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningOutput Microsoft.Extensions.AI.ReasoningOutput.Summary",
+ "Stage": "Stable",
+ "Value": "1"
+ },
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningOutput Microsoft.Extensions.AI.ReasoningOutput.Full",
+ "Stage": "Stable",
+ "Value": "2"
+ }
+ ]
+ },
{
"Type": "sealed class Microsoft.Extensions.AI.RequiredChatToolMode : Microsoft.Extensions.AI.ChatToolMode",
"Stage": "Stable",
diff --git a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIChatClient.cs b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIChatClient.cs
index a7ca8c08d95..b1e899cd364 100644
--- a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIChatClient.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIChatClient.cs
@@ -568,6 +568,7 @@ private ChatCompletionOptions ToOpenAIOptions(ChatOptions? options)
result.PresencePenalty ??= options.PresencePenalty;
result.Temperature ??= options.Temperature;
result.Seed ??= options.Seed;
+ result.ReasoningEffortLevel ??= ToOpenAIChatReasoningEffortLevel(options.Reasoning?.Effort);
OpenAIClientExtensions.PatchModelIfNotSet(ref result.Patch, options.ModelId);
if (options.StopSequences is { Count: > 0 } stopSequences)
@@ -637,6 +638,16 @@ ChatResponseFormatJson jsonFormat when OpenAIClientExtensions.StrictSchemaTransf
_ => null
};
+ private static ChatReasoningEffortLevel? ToOpenAIChatReasoningEffortLevel(ReasoningEffort? effort) =>
+ effort switch
+ {
+ ReasoningEffort.Low => ChatReasoningEffortLevel.Low,
+ ReasoningEffort.Medium => ChatReasoningEffortLevel.Medium,
+ ReasoningEffort.High => ChatReasoningEffortLevel.High,
+ ReasoningEffort.ExtraHigh => ChatReasoningEffortLevel.High,
+ _ => (ChatReasoningEffortLevel?)null,
+ };
+
private static UsageDetails FromOpenAIUsage(ChatTokenUsage tokenUsage)
{
var destination = new UsageDetails
diff --git a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs
index 5fb6ac1935a..98217dfd791 100644
--- a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs
@@ -716,6 +716,7 @@ private CreateResponseOptions AsCreateResponseOptions(ChatOptions? options, out
result.Model ??= options.ModelId ?? _responseClient.Model;
result.Temperature ??= options.Temperature;
result.TopP ??= options.TopP;
+ result.ReasoningOptions ??= ToOpenAIResponseReasoningOptions(options.Reasoning);
// If the CreateResponseOptions.PreviousResponseId is already set (likely rare), then we don't need to do
// anything with regards to Conversation, because they're mutually exclusive and we would want to ignore
@@ -814,6 +815,41 @@ ChatResponseFormatJson jsonFormat when OpenAIClientExtensions.StrictSchemaTransf
_ => null,
};
+ private static ResponseReasoningOptions? ToOpenAIResponseReasoningOptions(ReasoningOptions? reasoning)
+ {
+ if (reasoning is null)
+ {
+ return null;
+ }
+
+ ResponseReasoningEffortLevel? effortLevel = reasoning.Effort switch
+ {
+ ReasoningEffort.Low => ResponseReasoningEffortLevel.Low,
+ ReasoningEffort.Medium => ResponseReasoningEffortLevel.Medium,
+ ReasoningEffort.High => ResponseReasoningEffortLevel.High,
+ ReasoningEffort.ExtraHigh => ResponseReasoningEffortLevel.High, // Map to highest available
+ _ => (ResponseReasoningEffortLevel?)null, // None or null - let OpenAI use its default
+ };
+
+ ResponseReasoningSummaryVerbosity? summary = reasoning.Output switch
+ {
+ ReasoningOutput.Summary => ResponseReasoningSummaryVerbosity.Concise,
+ ReasoningOutput.Full => ResponseReasoningSummaryVerbosity.Detailed,
+ _ => (ResponseReasoningSummaryVerbosity?)null, // None or null - let OpenAI use its default
+ };
+
+ if (effortLevel is null && summary is null)
+ {
+ return null;
+ }
+
+ return new ResponseReasoningOptions
+ {
+ ReasoningEffortLevel = effortLevel,
+ ReasoningSummaryVerbosity = summary,
+ };
+ }
+
/// Convert a sequence of s to s.
internal static IEnumerable ToOpenAIResponseItems(IEnumerable inputs, ChatOptions? options)
{
diff --git a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ChatOptionsTests.cs b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ChatOptionsTests.cs
index e6d863220e1..4b8b4aa2f94 100644
--- a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ChatOptionsTests.cs
+++ b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ChatOptionsTests.cs
@@ -23,6 +23,7 @@ public void Constructor_Parameterless_PropsDefaulted()
Assert.Null(options.FrequencyPenalty);
Assert.Null(options.PresencePenalty);
Assert.Null(options.Seed);
+ Assert.Null(options.Reasoning);
Assert.Null(options.ResponseFormat);
Assert.Null(options.ModelId);
Assert.Null(options.StopSequences);
@@ -42,6 +43,7 @@ public void Constructor_Parameterless_PropsDefaulted()
Assert.Null(clone.FrequencyPenalty);
Assert.Null(clone.PresencePenalty);
Assert.Null(clone.Seed);
+ Assert.Null(clone.Reasoning);
Assert.Null(clone.ResponseFormat);
Assert.Null(clone.ModelId);
Assert.Null(clone.StopSequences);
@@ -89,6 +91,7 @@ public void Properties_Roundtrip()
options.FrequencyPenalty = 0.4f;
options.PresencePenalty = 0.5f;
options.Seed = 12345;
+ options.Reasoning = new ReasoningOptions { Effort = ReasoningEffort.Medium, Output = ReasoningOutput.Summary };
options.ResponseFormat = ChatResponseFormat.Json;
options.ModelId = "modelId";
options.StopSequences = stopSequences;
@@ -109,6 +112,9 @@ public void Properties_Roundtrip()
Assert.Equal(0.4f, options.FrequencyPenalty);
Assert.Equal(0.5f, options.PresencePenalty);
Assert.Equal(12345, options.Seed);
+ Assert.NotNull(options.Reasoning);
+ Assert.Equal(ReasoningEffort.Medium, options.Reasoning.Effort);
+ Assert.Equal(ReasoningOutput.Summary, options.Reasoning.Output);
Assert.Same(ChatResponseFormat.Json, options.ResponseFormat);
Assert.Equal("modelId", options.ModelId);
Assert.Same(stopSequences, options.StopSequences);
@@ -129,6 +135,10 @@ public void Properties_Roundtrip()
Assert.Equal(0.4f, clone.FrequencyPenalty);
Assert.Equal(0.5f, clone.PresencePenalty);
Assert.Equal(12345, clone.Seed);
+ Assert.NotNull(clone.Reasoning);
+ Assert.NotSame(options.Reasoning, clone.Reasoning); // Should be a shallow copy
+ Assert.Equal(ReasoningEffort.Medium, clone.Reasoning.Effort);
+ Assert.Equal(ReasoningOutput.Summary, clone.Reasoning.Output);
Assert.Same(ChatResponseFormat.Json, clone.ResponseFormat);
Assert.Equal("modelId", clone.ModelId);
Assert.Equal(stopSequences, clone.StopSequences);
@@ -168,6 +178,7 @@ public void JsonSerialization_Roundtrips()
options.FrequencyPenalty = 0.4f;
options.PresencePenalty = 0.5f;
options.Seed = 12345;
+ options.Reasoning = new ReasoningOptions { Effort = ReasoningEffort.High, Output = ReasoningOutput.Full };
options.ResponseFormat = ChatResponseFormat.Json;
options.ModelId = "modelId";
options.StopSequences = stopSequences;
@@ -197,6 +208,9 @@ public void JsonSerialization_Roundtrips()
Assert.Equal(0.4f, deserialized.FrequencyPenalty);
Assert.Equal(0.5f, deserialized.PresencePenalty);
Assert.Equal(12345, deserialized.Seed);
+ Assert.NotNull(deserialized.Reasoning);
+ Assert.Equal(ReasoningEffort.High, deserialized.Reasoning.Effort);
+ Assert.Equal(ReasoningOutput.Full, deserialized.Reasoning.Output);
Assert.IsType(deserialized.ResponseFormat);
Assert.Equal("modelId", deserialized.ModelId);
Assert.NotSame(stopSequences, deserialized.StopSequences);
diff --git a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ReasoningOptionsTests.cs b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ReasoningOptionsTests.cs
new file mode 100644
index 00000000000..273455f36fe
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ReasoningOptionsTests.cs
@@ -0,0 +1,121 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Text.Json;
+using Xunit;
+
+namespace Microsoft.Extensions.AI;
+
+public class ReasoningOptionsTests
+{
+ [Fact]
+ public void Constructor_Default_PropertiesAreNull()
+ {
+ ReasoningOptions options = new();
+
+ Assert.Null(options.Effort);
+ Assert.Null(options.Output);
+ }
+
+ [Theory]
+ [InlineData(ReasoningEffort.None)]
+ [InlineData(ReasoningEffort.Low)]
+ [InlineData(ReasoningEffort.Medium)]
+ [InlineData(ReasoningEffort.High)]
+ [InlineData(ReasoningEffort.ExtraHigh)]
+ public void Effort_Roundtrips(ReasoningEffort effort)
+ {
+ ReasoningOptions options = new() { Effort = effort };
+ Assert.Equal(effort, options.Effort);
+ }
+
+ [Theory]
+ [InlineData(ReasoningOutput.None)]
+ [InlineData(ReasoningOutput.Summary)]
+ [InlineData(ReasoningOutput.Full)]
+ public void Output_Roundtrips(ReasoningOutput output)
+ {
+ ReasoningOptions options = new() { Output = output };
+ Assert.Equal(output, options.Output);
+ }
+
+ [Fact]
+ public void JsonSerialization_Roundtrips()
+ {
+ ReasoningOptions options = new()
+ {
+ Effort = ReasoningEffort.High,
+ Output = ReasoningOutput.Full,
+ };
+
+ string json = JsonSerializer.Serialize(options, TestJsonSerializerContext.Default.ReasoningOptions);
+ ReasoningOptions? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningOptions);
+
+ Assert.NotNull(deserialized);
+ Assert.Equal(options.Effort, deserialized.Effort);
+ Assert.Equal(options.Output, deserialized.Output);
+ }
+
+ [Fact]
+ public void JsonSerialization_NullProperties_Roundtrips()
+ {
+ ReasoningOptions options = new();
+
+ string json = JsonSerializer.Serialize(options, TestJsonSerializerContext.Default.ReasoningOptions);
+ ReasoningOptions? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningOptions);
+
+ Assert.NotNull(deserialized);
+ Assert.Null(deserialized.Effort);
+ Assert.Null(deserialized.Output);
+ }
+
+ [Fact]
+ public void JsonSerialization_EffortOnly_Roundtrips()
+ {
+ ReasoningOptions options = new() { Effort = ReasoningEffort.Medium };
+
+ string json = JsonSerializer.Serialize(options, TestJsonSerializerContext.Default.ReasoningOptions);
+ ReasoningOptions? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningOptions);
+
+ Assert.NotNull(deserialized);
+ Assert.Equal(ReasoningEffort.Medium, deserialized.Effort);
+ Assert.Null(deserialized.Output);
+ }
+
+ [Fact]
+ public void JsonSerialization_OutputOnly_Roundtrips()
+ {
+ ReasoningOptions options = new() { Output = ReasoningOutput.Summary };
+
+ string json = JsonSerializer.Serialize(options, TestJsonSerializerContext.Default.ReasoningOptions);
+ ReasoningOptions? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningOptions);
+
+ Assert.NotNull(deserialized);
+ Assert.Null(deserialized.Effort);
+ Assert.Equal(ReasoningOutput.Summary, deserialized.Output);
+ }
+
+ [Fact]
+ public void JsonSerialization_AllEffortValues_SerializeAsStrings()
+ {
+ // Test all ReasoningEffort values serialize correctly
+ foreach (ReasoningEffort effort in new[] { ReasoningEffort.None, ReasoningEffort.Low, ReasoningEffort.Medium, ReasoningEffort.High, ReasoningEffort.ExtraHigh })
+ {
+ string json = JsonSerializer.Serialize(effort, TestJsonSerializerContext.Default.ReasoningEffort);
+ ReasoningEffort? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningEffort);
+ Assert.Equal(effort, deserialized);
+ }
+ }
+
+ [Fact]
+ public void JsonSerialization_AllOutputValues_SerializeAsStrings()
+ {
+ // Test all ReasoningOutput values serialize correctly
+ foreach (ReasoningOutput output in new[] { ReasoningOutput.None, ReasoningOutput.Summary, ReasoningOutput.Full })
+ {
+ string json = JsonSerializer.Serialize(output, TestJsonSerializerContext.Default.ReasoningOutput);
+ ReasoningOutput? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningOutput);
+ Assert.Equal(output, deserialized);
+ }
+ }
+}
diff --git a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/TestJsonSerializerContext.cs b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/TestJsonSerializerContext.cs
index faaa799baf4..6c448d0efb1 100644
--- a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/TestJsonSerializerContext.cs
+++ b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/TestJsonSerializerContext.cs
@@ -41,4 +41,7 @@ namespace Microsoft.Extensions.AI;
[JsonSerializable(typeof(ResponseContinuationToken))]
[JsonSerializable(typeof(UserInputRequestContent[]))]
[JsonSerializable(typeof(UserInputResponseContent[]))]
+[JsonSerializable(typeof(ReasoningOptions))]
+[JsonSerializable(typeof(ReasoningEffort))]
+[JsonSerializable(typeof(ReasoningOutput))]
internal sealed partial class TestJsonSerializerContext : JsonSerializerContext;
diff --git a/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIChatClientTests.cs b/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIChatClientTests.cs
index 99aa44cdd0f..f6d6ad95eb8 100644
--- a/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIChatClientTests.cs
+++ b/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIChatClientTests.cs
@@ -1816,4 +1816,94 @@ public void AsChatMessages_PreservesRole_MultipleMessages()
Assert.Equal(ChatRole.Assistant, extMessages[2].Role);
Assert.Equal(ChatRole.System, extMessages[3].Role);
}
+
+ [Theory]
+ [InlineData(ReasoningEffort.Low, "low")]
+ [InlineData(ReasoningEffort.Medium, "medium")]
+ [InlineData(ReasoningEffort.High, "high")]
+ [InlineData(ReasoningEffort.ExtraHigh, "high")] // ExtraHigh maps to high in OpenAI
+ public async Task ReasoningOptions_Effort_ProducesExpectedJson(ReasoningEffort effort, string expectedEffortString)
+ {
+ string input = $$"""
+ {
+ "messages": [
+ {
+ "role": "user",
+ "content": "hello"
+ }
+ ],
+ "model": "o4-mini",
+ "reasoning_effort": "{{expectedEffortString}}"
+ }
+ """;
+
+ const string Output = """
+ {
+ "id": "chatcmpl-test",
+ "object": "chat.completion",
+ "model": "o4-mini",
+ "choices": [
+ {
+ "message": {
+ "role": "assistant",
+ "content": "Hello!"
+ },
+ "finish_reason": "stop"
+ }
+ ]
+ }
+ """;
+
+ using VerbatimHttpHandler handler = new(input, Output);
+ using HttpClient httpClient = new(handler);
+ using IChatClient client = CreateChatClient(httpClient, "o4-mini");
+
+ Assert.NotNull(await client.GetResponseAsync("hello", new()
+ {
+ Reasoning = new ReasoningOptions { Effort = effort }
+ }));
+ }
+
+ [Fact]
+ public async Task ReasoningOptions_None_ProducesNoReasoningEffortInJson()
+ {
+ const string Input = """
+ {
+ "messages": [
+ {
+ "role": "user",
+ "content": "hello"
+ }
+ ],
+ "model": "gpt-4o-mini"
+ }
+ """;
+
+ const string Output = """
+ {
+ "id": "chatcmpl-test",
+ "object": "chat.completion",
+ "model": "gpt-4o-mini",
+ "choices": [
+ {
+ "message": {
+ "role": "assistant",
+ "content": "Hello!"
+ },
+ "finish_reason": "stop"
+ }
+ ]
+ }
+ """;
+
+ using VerbatimHttpHandler handler = new(Input, Output);
+ using HttpClient httpClient = new(handler);
+ using IChatClient client = CreateChatClient(httpClient, "gpt-4o-mini");
+
+ // None effort should not include reasoning_effort in the request
+ Assert.NotNull(await client.GetResponseAsync("hello", new()
+ {
+ Reasoning = new ReasoningOptions { Effort = ReasoningEffort.None }
+ }));
+ }
}
diff --git a/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs b/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs
index d1a6749450b..eea003bf9d1 100644
--- a/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs
+++ b/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs
@@ -5671,6 +5671,115 @@ static bool HasCorrectImageData(AIContent o, int index)
Assert.Equal("img_call_ghi789", toolCall.ImageId);
}
+ [Theory]
+ [InlineData(ReasoningEffort.Low, ReasoningOutput.Summary, "low", "concise")]
+ [InlineData(ReasoningEffort.Medium, ReasoningOutput.Full, "medium", "detailed")]
+ [InlineData(ReasoningEffort.High, ReasoningOutput.Summary, "high", "concise")]
+ [InlineData(ReasoningEffort.ExtraHigh, ReasoningOutput.Full, "high", "detailed")] // ExtraHigh maps to high
+ public async Task ReasoningOptions_EffortAndOutput_ProducesExpectedJson(
+ ReasoningEffort effort,
+ ReasoningOutput output,
+ string expectedEffortString,
+ string expectedSummaryString)
+ {
+ string input = $$"""
+ {
+ "model": "o4-mini",
+ "input": [{
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": "hello"}]
+ }],
+ "reasoning": {
+ "effort": "{{expectedEffortString}}",
+ "summary": "{{expectedSummaryString}}"
+ }
+ }
+ """;
+
+ const string Output = """
+ {
+ "id": "resp_test",
+ "object": "response",
+ "created_at": 1741891428,
+ "status": "completed",
+ "model": "o4-mini",
+ "output": [
+ {
+ "id": "msg_test",
+ "type": "message",
+ "status": "completed",
+ "role": "assistant",
+ "content": [{"type": "output_text", "text": "Hello!"}]
+ }
+ ],
+ "usage": {
+ "input_tokens": 10,
+ "output_tokens": 5,
+ "total_tokens": 15
+ }
+ }
+ """;
+
+ using VerbatimHttpHandler handler = new(input, Output);
+ using HttpClient httpClient = new(handler);
+ using IChatClient client = CreateResponseClient(httpClient, "o4-mini");
+
+ Assert.NotNull(await client.GetResponseAsync("hello", new()
+ {
+ Reasoning = new ReasoningOptions { Effort = effort, Output = output }
+ }));
+ }
+
+ [Fact]
+ public async Task ReasoningOptions_NoneValues_ProducesNoReasoningInJson()
+ {
+ const string Input = """
+ {
+ "model": "o4-mini",
+ "input": [{
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": "hello"}]
+ }]
+ }
+ """;
+
+ const string Output = """
+ {
+ "id": "resp_test",
+ "object": "response",
+ "created_at": 1741891428,
+ "status": "completed",
+ "model": "o4-mini",
+ "output": [
+ {
+ "id": "msg_test",
+ "type": "message",
+ "status": "completed",
+ "role": "assistant",
+ "content": [{"type": "output_text", "text": "Hello!"}]
+ }
+ ],
+ "usage": {
+ "input_tokens": 10,
+ "output_tokens": 5,
+ "total_tokens": 15
+ }
+ }
+ """;
+
+ using VerbatimHttpHandler handler = new(Input, Output);
+ using HttpClient httpClient = new(handler);
+ using IChatClient client = CreateResponseClient(httpClient, "o4-mini");
+
+ // None values for both should result in no reasoning options in the request
+ Assert.NotNull(await client.GetResponseAsync("hello", new()
+ {
+ Reasoning = new ReasoningOptions { Effort = ReasoningEffort.None, Output = ReasoningOutput.None }
+ }));
+ }
+
private static IChatClient CreateResponseClient(HttpClient httpClient, string modelId) =>
new OpenAIClient(
new ApiKeyCredential("apikey"),