From ab9701125f5fca44c45843b09e14ae48e45a3620 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Sun, 1 Feb 2026 23:47:46 +0000
Subject: [PATCH 1/6] Initial plan
From ba4e283c4fa460dffe8d1fde32a280a8eefd3bc6 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 2 Feb 2026 00:05:24 +0000
Subject: [PATCH 2/6] Add ReasoningOptions to ChatOptions with OpenAI
implementation
Co-authored-by: stephentoub <2642209+stephentoub@users.noreply.github.com>
---
.../ChatCompletion/ChatOptions.cs | 13 ++++++
.../ChatCompletion/ReasoningEffort.cs | 44 +++++++++++++++++++
.../ChatCompletion/ReasoningOptions.cs | 42 ++++++++++++++++++
.../ChatCompletion/ReasoningOutput.cs | 33 ++++++++++++++
.../OpenAIChatClient.cs | 18 ++++++++
.../OpenAIResponsesChatClient.cs | 36 +++++++++++++++
src/Shared/DiagnosticIds/DiagnosticIds.cs | 1 +
.../ChatCompletion/ChatOptionsTests.cs | 14 ++++++
8 files changed, 201 insertions(+)
create mode 100644 src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
create mode 100644 src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
create mode 100644 src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
index 944705898f1..a2767b87c92 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
@@ -37,6 +37,7 @@ protected ChatOptions(ChatOptions? other)
ModelId = other.ModelId;
PresencePenalty = other.PresencePenalty;
RawRepresentationFactory = other.RawRepresentationFactory;
+ Reasoning = other.Reasoning is { } reasoning ? new ReasoningOptions { Effort = reasoning.Effort, Output = reasoning.Output } : null;
ResponseFormat = other.ResponseFormat;
Seed = other.Seed;
Temperature = other.Temperature;
@@ -108,6 +109,18 @@ protected ChatOptions(ChatOptions? other)
/// Gets or sets a seed value used by a service to control the reproducibility of results.
public long? Seed { get; set; }
+ ///
+ /// Gets or sets the reasoning options for the chat request.
+ ///
+ ///
+ /// If , no reasoning options are specified and the client will use its default.
+ /// Not all providers support reasoning options. Implementations should make a best-effort attempt
+ /// to map the requested options to the provider's capabilities. If a provider doesn't support reasoning,
+ /// these options may be ignored.
+ ///
+ [Experimental(DiagnosticIds.Experiments.AIReasoning, UrlFormat = DiagnosticIds.UrlFormat)]
+ public ReasoningOptions? Reasoning { get; set; }
+
///
/// Gets or sets the response format for the chat request.
///
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
new file mode 100644
index 00000000000..5d91372e1d0
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
@@ -0,0 +1,44 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics.CodeAnalysis;
+using Microsoft.Shared.DiagnosticIds;
+
+namespace Microsoft.Extensions.AI;
+
+///
+/// Specifies the level of reasoning effort that should be applied when generating chat responses.
+///
+///
+/// This value controls how much computational effort the model should put into reasoning.
+/// Higher values may result in more thoughtful responses but with increased latency and token usage.
+/// The specific interpretation and support for each level may vary between providers.
+///
+[Experimental(DiagnosticIds.Experiments.AIReasoning, UrlFormat = DiagnosticIds.UrlFormat)]
+public enum ReasoningEffort
+{
+ ///
+ /// No reasoning effort. Disables reasoning for providers that support it.
+ ///
+ None = 0,
+
+ ///
+ /// Low reasoning effort. Minimal reasoning for faster responses.
+ ///
+ Low = 1,
+
+ ///
+ /// Medium reasoning effort. Balanced reasoning for most use cases.
+ ///
+ Medium = 2,
+
+ ///
+ /// High reasoning effort. Extensive reasoning for complex tasks.
+ ///
+ High = 3,
+
+ ///
+ /// Extra high reasoning effort. Maximum reasoning for the most demanding tasks.
+ ///
+ ExtraHigh = 4,
+}
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
new file mode 100644
index 00000000000..df1f8416545
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
@@ -0,0 +1,42 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics.CodeAnalysis;
+using Microsoft.Shared.DiagnosticIds;
+
+namespace Microsoft.Extensions.AI;
+
+///
+/// Represents options for configuring reasoning behavior in chat requests.
+///
+///
+///
+/// Reasoning options allow control over how much computational effort the model
+/// should put into reasoning about the response, and how that reasoning should
+/// be exposed to the caller.
+///
+///
+/// Not all providers support all reasoning options. Implementations should
+/// make a best-effort attempt to map the requested options to the provider's
+/// capabilities. If a provider doesn't support reasoning, these options may be ignored.
+///
+///
+[Experimental(DiagnosticIds.Experiments.AIReasoning, UrlFormat = DiagnosticIds.UrlFormat)]
+public sealed class ReasoningOptions
+{
+ ///
+ /// Gets or sets the level of reasoning effort to apply.
+ ///
+ ///
+ /// The reasoning effort level, or to use the provider's default.
+ ///
+ public ReasoningEffort? Effort { get; set; }
+
+ ///
+ /// Gets or sets how reasoning content should be included in the response.
+ ///
+ ///
+ /// The reasoning output mode, or to use the provider's default.
+ ///
+ public ReasoningOutput? Output { get; set; }
+}
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
new file mode 100644
index 00000000000..19472503c36
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
@@ -0,0 +1,33 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics.CodeAnalysis;
+using Microsoft.Shared.DiagnosticIds;
+
+namespace Microsoft.Extensions.AI;
+
+///
+/// Specifies how reasoning content should be included in the response.
+///
+///
+/// Some providers support including reasoning or thinking traces in the response.
+/// This setting controls whether and how that reasoning content is exposed.
+///
+[Experimental(DiagnosticIds.Experiments.AIReasoning, UrlFormat = DiagnosticIds.UrlFormat)]
+public enum ReasoningOutput
+{
+ ///
+ /// No reasoning output. Do not include reasoning content in the response.
+ ///
+ None = 0,
+
+ ///
+ /// Summary reasoning output. Include a summary of the reasoning process.
+ ///
+ Summary = 1,
+
+ ///
+ /// Detailed reasoning output. Include detailed reasoning content in the response.
+ ///
+ Detailed = 2,
+}
diff --git a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIChatClient.cs b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIChatClient.cs
index a7ca8c08d95..e08d7aa1918 100644
--- a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIChatClient.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIChatClient.cs
@@ -568,6 +568,7 @@ private ChatCompletionOptions ToOpenAIOptions(ChatOptions? options)
result.PresencePenalty ??= options.PresencePenalty;
result.Temperature ??= options.Temperature;
result.Seed ??= options.Seed;
+ result.ReasoningEffortLevel ??= ToOpenAIChatReasoningEffortLevel(options.Reasoning?.Effort);
OpenAIClientExtensions.PatchModelIfNotSet(ref result.Patch, options.ModelId);
if (options.StopSequences is { Count: > 0 } stopSequences)
@@ -637,6 +638,23 @@ ChatResponseFormatJson jsonFormat when OpenAIClientExtensions.StrictSchemaTransf
_ => null
};
+ private static ChatReasoningEffortLevel? ToOpenAIChatReasoningEffortLevel(ReasoningEffort? effort)
+ {
+ if (effort is null or ReasoningEffort.None)
+ {
+ return null;
+ }
+
+ return effort switch
+ {
+ ReasoningEffort.Low => ChatReasoningEffortLevel.Low,
+ ReasoningEffort.Medium => ChatReasoningEffortLevel.Medium,
+ ReasoningEffort.High => ChatReasoningEffortLevel.High,
+ ReasoningEffort.ExtraHigh => ChatReasoningEffortLevel.High, // Map to highest available
+ _ => null,
+ };
+ }
+
private static UsageDetails FromOpenAIUsage(ChatTokenUsage tokenUsage)
{
var destination = new UsageDetails
diff --git a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs
index 5fb6ac1935a..1d882b2f1f4 100644
--- a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs
@@ -716,6 +716,7 @@ private CreateResponseOptions AsCreateResponseOptions(ChatOptions? options, out
result.Model ??= options.ModelId ?? _responseClient.Model;
result.Temperature ??= options.Temperature;
result.TopP ??= options.TopP;
+ result.ReasoningOptions ??= ToOpenAIResponseReasoningOptions(options.Reasoning);
// If the CreateResponseOptions.PreviousResponseId is already set (likely rare), then we don't need to do
// anything with regards to Conversation, because they're mutually exclusive and we would want to ignore
@@ -814,6 +815,41 @@ ChatResponseFormatJson jsonFormat when OpenAIClientExtensions.StrictSchemaTransf
_ => null,
};
+ private static ResponseReasoningOptions? ToOpenAIResponseReasoningOptions(ReasoningOptions? reasoning)
+ {
+ if (reasoning is null)
+ {
+ return null;
+ }
+
+ ResponseReasoningEffortLevel? effortLevel = reasoning.Effort switch
+ {
+ ReasoningEffort.Low => ResponseReasoningEffortLevel.Low,
+ ReasoningEffort.Medium => ResponseReasoningEffortLevel.Medium,
+ ReasoningEffort.High => ResponseReasoningEffortLevel.High,
+ ReasoningEffort.ExtraHigh => ResponseReasoningEffortLevel.High, // Map to highest available
+ _ => null, // None or null - let OpenAI use its default
+ };
+
+ ResponseReasoningSummaryVerbosity? summary = reasoning.Output switch
+ {
+ ReasoningOutput.Summary => ResponseReasoningSummaryVerbosity.Concise,
+ ReasoningOutput.Detailed => ResponseReasoningSummaryVerbosity.Detailed,
+ _ => null, // None or null - let OpenAI use its default
+ };
+
+ if (effortLevel is null && summary is null)
+ {
+ return null;
+ }
+
+ return new ResponseReasoningOptions
+ {
+ ReasoningEffortLevel = effortLevel,
+ ReasoningSummaryVerbosity = summary,
+ };
+ }
+
/// Convert a sequence of s to s.
internal static IEnumerable ToOpenAIResponseItems(IEnumerable inputs, ChatOptions? options)
{
diff --git a/src/Shared/DiagnosticIds/DiagnosticIds.cs b/src/Shared/DiagnosticIds/DiagnosticIds.cs
index 4cc736a7252..4e8bb59e470 100644
--- a/src/Shared/DiagnosticIds/DiagnosticIds.cs
+++ b/src/Shared/DiagnosticIds/DiagnosticIds.cs
@@ -57,6 +57,7 @@ internal static class Experiments
internal const string AIResponseContinuations = AIExperiments;
internal const string AICodeInterpreter = AIExperiments;
internal const string AIRealTime = AIExperiments;
+ internal const string AIReasoning = AIExperiments;
private const string AIExperiments = "MEAI001";
}
diff --git a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ChatOptionsTests.cs b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ChatOptionsTests.cs
index e6d863220e1..9c91c1c9ab3 100644
--- a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ChatOptionsTests.cs
+++ b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ChatOptionsTests.cs
@@ -23,6 +23,7 @@ public void Constructor_Parameterless_PropsDefaulted()
Assert.Null(options.FrequencyPenalty);
Assert.Null(options.PresencePenalty);
Assert.Null(options.Seed);
+ Assert.Null(options.Reasoning);
Assert.Null(options.ResponseFormat);
Assert.Null(options.ModelId);
Assert.Null(options.StopSequences);
@@ -42,6 +43,7 @@ public void Constructor_Parameterless_PropsDefaulted()
Assert.Null(clone.FrequencyPenalty);
Assert.Null(clone.PresencePenalty);
Assert.Null(clone.Seed);
+ Assert.Null(clone.Reasoning);
Assert.Null(clone.ResponseFormat);
Assert.Null(clone.ModelId);
Assert.Null(clone.StopSequences);
@@ -89,6 +91,7 @@ public void Properties_Roundtrip()
options.FrequencyPenalty = 0.4f;
options.PresencePenalty = 0.5f;
options.Seed = 12345;
+ options.Reasoning = new ReasoningOptions { Effort = ReasoningEffort.Medium, Output = ReasoningOutput.Summary };
options.ResponseFormat = ChatResponseFormat.Json;
options.ModelId = "modelId";
options.StopSequences = stopSequences;
@@ -109,6 +112,9 @@ public void Properties_Roundtrip()
Assert.Equal(0.4f, options.FrequencyPenalty);
Assert.Equal(0.5f, options.PresencePenalty);
Assert.Equal(12345, options.Seed);
+ Assert.NotNull(options.Reasoning);
+ Assert.Equal(ReasoningEffort.Medium, options.Reasoning.Effort);
+ Assert.Equal(ReasoningOutput.Summary, options.Reasoning.Output);
Assert.Same(ChatResponseFormat.Json, options.ResponseFormat);
Assert.Equal("modelId", options.ModelId);
Assert.Same(stopSequences, options.StopSequences);
@@ -129,6 +135,10 @@ public void Properties_Roundtrip()
Assert.Equal(0.4f, clone.FrequencyPenalty);
Assert.Equal(0.5f, clone.PresencePenalty);
Assert.Equal(12345, clone.Seed);
+ Assert.NotNull(clone.Reasoning);
+ Assert.NotSame(options.Reasoning, clone.Reasoning); // Should be a shallow copy
+ Assert.Equal(ReasoningEffort.Medium, clone.Reasoning.Effort);
+ Assert.Equal(ReasoningOutput.Summary, clone.Reasoning.Output);
Assert.Same(ChatResponseFormat.Json, clone.ResponseFormat);
Assert.Equal("modelId", clone.ModelId);
Assert.Equal(stopSequences, clone.StopSequences);
@@ -168,6 +178,7 @@ public void JsonSerialization_Roundtrips()
options.FrequencyPenalty = 0.4f;
options.PresencePenalty = 0.5f;
options.Seed = 12345;
+ options.Reasoning = new ReasoningOptions { Effort = ReasoningEffort.High, Output = ReasoningOutput.Detailed };
options.ResponseFormat = ChatResponseFormat.Json;
options.ModelId = "modelId";
options.StopSequences = stopSequences;
@@ -197,6 +208,9 @@ public void JsonSerialization_Roundtrips()
Assert.Equal(0.4f, deserialized.FrequencyPenalty);
Assert.Equal(0.5f, deserialized.PresencePenalty);
Assert.Equal(12345, deserialized.Seed);
+ Assert.NotNull(deserialized.Reasoning);
+ Assert.Equal(ReasoningEffort.High, deserialized.Reasoning.Effort);
+ Assert.Equal(ReasoningOutput.Detailed, deserialized.Reasoning.Output);
Assert.IsType(deserialized.ResponseFormat);
Assert.Equal("modelId", deserialized.ModelId);
Assert.NotSame(stopSequences, deserialized.StopSequences);
From 676ac5de34d20091b5decf0579866c51a43092e4 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 2 Feb 2026 00:09:10 +0000
Subject: [PATCH 3/6] Address code review feedback: add Clone method and
document ExtraHigh limitation
Co-authored-by: stephentoub <2642209+stephentoub@users.noreply.github.com>
---
.../ChatCompletion/ChatOptions.cs | 2 +-
.../ChatCompletion/ReasoningEffort.cs | 4 ++++
.../ChatCompletion/ReasoningOptions.cs | 8 ++++++++
3 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
index a2767b87c92..b9176416632 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
@@ -37,7 +37,7 @@ protected ChatOptions(ChatOptions? other)
ModelId = other.ModelId;
PresencePenalty = other.PresencePenalty;
RawRepresentationFactory = other.RawRepresentationFactory;
- Reasoning = other.Reasoning is { } reasoning ? new ReasoningOptions { Effort = reasoning.Effort, Output = reasoning.Output } : null;
+ Reasoning = other.Reasoning?.Clone();
ResponseFormat = other.ResponseFormat;
Seed = other.Seed;
Temperature = other.Temperature;
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
index 5d91372e1d0..bbccbd97b4a 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
@@ -40,5 +40,9 @@ public enum ReasoningEffort
///
/// Extra high reasoning effort. Maximum reasoning for the most demanding tasks.
///
+ ///
+ /// Not all providers support this level of reasoning effort. Some providers may map this to
+ /// their highest available level (such as ) if they don't have an equivalent.
+ ///
ExtraHigh = 4,
}
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
index df1f8416545..f38c7b78960 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
@@ -39,4 +39,12 @@ public sealed class ReasoningOptions
/// The reasoning output mode, or to use the provider's default.
///
public ReasoningOutput? Output { get; set; }
+
+ /// Creates a shallow clone of this instance.
+ /// A shallow clone of this instance.
+ public ReasoningOptions Clone() => new()
+ {
+ Effort = Effort,
+ Output = Output,
+ };
}
From e7f7ce8dc60941827693399b38fc4693668392fd Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 2 Feb 2026 02:27:12 +0000
Subject: [PATCH 4/6] Address PR feedback: remove experimental attributes, make
Clone internal, simplify ToOpenAIChatReasoningEffortLevel
Co-authored-by: stephentoub <2642209+stephentoub@users.noreply.github.com>
---
.../ChatCompletion/ChatOptions.cs | 1 -
.../ChatCompletion/ReasoningEffort.cs | 8 --
.../ChatCompletion/ReasoningOptions.cs | 6 +-
.../ChatCompletion/ReasoningOutput.cs | 4 -
.../Microsoft.Extensions.AI.Abstractions.json | 88 +++++++++++++++++++
.../OpenAIChatClient.cs | 15 +---
src/Shared/DiagnosticIds/DiagnosticIds.cs | 1 -
7 files changed, 93 insertions(+), 30 deletions(-)
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
index b9176416632..718868b30db 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
@@ -118,7 +118,6 @@ protected ChatOptions(ChatOptions? other)
/// to map the requested options to the provider's capabilities. If a provider doesn't support reasoning,
/// these options may be ignored.
///
- [Experimental(DiagnosticIds.Experiments.AIReasoning, UrlFormat = DiagnosticIds.UrlFormat)]
public ReasoningOptions? Reasoning { get; set; }
///
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
index bbccbd97b4a..4636cff3a00 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
@@ -1,9 +1,6 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-using System.Diagnostics.CodeAnalysis;
-using Microsoft.Shared.DiagnosticIds;
-
namespace Microsoft.Extensions.AI;
///
@@ -14,7 +11,6 @@ namespace Microsoft.Extensions.AI;
/// Higher values may result in more thoughtful responses but with increased latency and token usage.
/// The specific interpretation and support for each level may vary between providers.
///
-[Experimental(DiagnosticIds.Experiments.AIReasoning, UrlFormat = DiagnosticIds.UrlFormat)]
public enum ReasoningEffort
{
///
@@ -40,9 +36,5 @@ public enum ReasoningEffort
///
/// Extra high reasoning effort. Maximum reasoning for the most demanding tasks.
///
- ///
- /// Not all providers support this level of reasoning effort. Some providers may map this to
- /// their highest available level (such as ) if they don't have an equivalent.
- ///
ExtraHigh = 4,
}
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
index f38c7b78960..b55bf8f09c8 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
@@ -1,9 +1,6 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-using System.Diagnostics.CodeAnalysis;
-using Microsoft.Shared.DiagnosticIds;
-
namespace Microsoft.Extensions.AI;
///
@@ -21,7 +18,6 @@ namespace Microsoft.Extensions.AI;
/// capabilities. If a provider doesn't support reasoning, these options may be ignored.
///
///
-[Experimental(DiagnosticIds.Experiments.AIReasoning, UrlFormat = DiagnosticIds.UrlFormat)]
public sealed class ReasoningOptions
{
///
@@ -42,7 +38,7 @@ public sealed class ReasoningOptions
/// Creates a shallow clone of this instance.
/// A shallow clone of this instance.
- public ReasoningOptions Clone() => new()
+ internal ReasoningOptions Clone() => new()
{
Effort = Effort,
Output = Output,
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
index 19472503c36..90c53d1c9e2 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
@@ -1,9 +1,6 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-using System.Diagnostics.CodeAnalysis;
-using Microsoft.Shared.DiagnosticIds;
-
namespace Microsoft.Extensions.AI;
///
@@ -13,7 +10,6 @@ namespace Microsoft.Extensions.AI;
/// Some providers support including reasoning or thinking traces in the response.
/// This setting controls whether and how that reasoning content is exposed.
///
-[Experimental(DiagnosticIds.Experiments.AIReasoning, UrlFormat = DiagnosticIds.UrlFormat)]
public enum ReasoningOutput
{
///
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/Microsoft.Extensions.AI.Abstractions.json b/src/Libraries/Microsoft.Extensions.AI.Abstractions/Microsoft.Extensions.AI.Abstractions.json
index e6ad6cc28b6..96005b76bab 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/Microsoft.Extensions.AI.Abstractions.json
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/Microsoft.Extensions.AI.Abstractions.json
@@ -1033,6 +1033,10 @@
"Member": "System.Func? Microsoft.Extensions.AI.ChatOptions.RawRepresentationFactory { get; set; }",
"Stage": "Stable"
},
+ {
+ "Member": "Microsoft.Extensions.AI.ReasoningOptions? Microsoft.Extensions.AI.ChatOptions.Reasoning { get; set; }",
+ "Stage": "Stable"
+ },
{
"Member": "Microsoft.Extensions.AI.ChatResponseFormat? Microsoft.Extensions.AI.ChatOptions.ResponseFormat { get; set; }",
"Stage": "Stable"
@@ -2111,6 +2115,90 @@
}
]
},
+ {
+ "Type": "enum Microsoft.Extensions.AI.ReasoningEffort",
+ "Stage": "Stable",
+ "Methods": [
+ {
+ "Member": "Microsoft.Extensions.AI.ReasoningEffort.ReasoningEffort();",
+ "Stage": "Stable"
+ }
+ ],
+ "Fields": [
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningEffort Microsoft.Extensions.AI.ReasoningEffort.None",
+ "Stage": "Stable",
+ "Value": "0"
+ },
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningEffort Microsoft.Extensions.AI.ReasoningEffort.Low",
+ "Stage": "Stable",
+ "Value": "1"
+ },
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningEffort Microsoft.Extensions.AI.ReasoningEffort.Medium",
+ "Stage": "Stable",
+ "Value": "2"
+ },
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningEffort Microsoft.Extensions.AI.ReasoningEffort.High",
+ "Stage": "Stable",
+ "Value": "3"
+ },
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningEffort Microsoft.Extensions.AI.ReasoningEffort.ExtraHigh",
+ "Stage": "Stable",
+ "Value": "4"
+ }
+ ]
+ },
+ {
+ "Type": "sealed class Microsoft.Extensions.AI.ReasoningOptions",
+ "Stage": "Stable",
+ "Methods": [
+ {
+ "Member": "Microsoft.Extensions.AI.ReasoningOptions.ReasoningOptions();",
+ "Stage": "Stable"
+ }
+ ],
+ "Properties": [
+ {
+ "Member": "Microsoft.Extensions.AI.ReasoningEffort? Microsoft.Extensions.AI.ReasoningOptions.Effort { get; set; }",
+ "Stage": "Stable"
+ },
+ {
+ "Member": "Microsoft.Extensions.AI.ReasoningOutput? Microsoft.Extensions.AI.ReasoningOptions.Output { get; set; }",
+ "Stage": "Stable"
+ }
+ ]
+ },
+ {
+ "Type": "enum Microsoft.Extensions.AI.ReasoningOutput",
+ "Stage": "Stable",
+ "Methods": [
+ {
+ "Member": "Microsoft.Extensions.AI.ReasoningOutput.ReasoningOutput();",
+ "Stage": "Stable"
+ }
+ ],
+ "Fields": [
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningOutput Microsoft.Extensions.AI.ReasoningOutput.None",
+ "Stage": "Stable",
+ "Value": "0"
+ },
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningOutput Microsoft.Extensions.AI.ReasoningOutput.Summary",
+ "Stage": "Stable",
+ "Value": "1"
+ },
+ {
+ "Member": "const Microsoft.Extensions.AI.ReasoningOutput Microsoft.Extensions.AI.ReasoningOutput.Detailed",
+ "Stage": "Stable",
+ "Value": "2"
+ }
+ ]
+ },
{
"Type": "sealed class Microsoft.Extensions.AI.RequiredChatToolMode : Microsoft.Extensions.AI.ChatToolMode",
"Stage": "Stable",
diff --git a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIChatClient.cs b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIChatClient.cs
index e08d7aa1918..b1e899cd364 100644
--- a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIChatClient.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIChatClient.cs
@@ -638,22 +638,15 @@ ChatResponseFormatJson jsonFormat when OpenAIClientExtensions.StrictSchemaTransf
_ => null
};
- private static ChatReasoningEffortLevel? ToOpenAIChatReasoningEffortLevel(ReasoningEffort? effort)
- {
- if (effort is null or ReasoningEffort.None)
- {
- return null;
- }
-
- return effort switch
+ private static ChatReasoningEffortLevel? ToOpenAIChatReasoningEffortLevel(ReasoningEffort? effort) =>
+ effort switch
{
ReasoningEffort.Low => ChatReasoningEffortLevel.Low,
ReasoningEffort.Medium => ChatReasoningEffortLevel.Medium,
ReasoningEffort.High => ChatReasoningEffortLevel.High,
- ReasoningEffort.ExtraHigh => ChatReasoningEffortLevel.High, // Map to highest available
- _ => null,
+ ReasoningEffort.ExtraHigh => ChatReasoningEffortLevel.High,
+ _ => (ChatReasoningEffortLevel?)null,
};
- }
private static UsageDetails FromOpenAIUsage(ChatTokenUsage tokenUsage)
{
diff --git a/src/Shared/DiagnosticIds/DiagnosticIds.cs b/src/Shared/DiagnosticIds/DiagnosticIds.cs
index 4e8bb59e470..4cc736a7252 100644
--- a/src/Shared/DiagnosticIds/DiagnosticIds.cs
+++ b/src/Shared/DiagnosticIds/DiagnosticIds.cs
@@ -57,7 +57,6 @@ internal static class Experiments
internal const string AIResponseContinuations = AIExperiments;
internal const string AICodeInterpreter = AIExperiments;
internal const string AIRealTime = AIExperiments;
- internal const string AIReasoning = AIExperiments;
private const string AIExperiments = "MEAI001";
}
From f459c9c06a0c06c79e61ad4dfc2ab05c7bd28e22 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 2 Feb 2026 13:57:22 +0000
Subject: [PATCH 5/6] Address PR feedback: update docs, remove enum values, add
tests
Co-authored-by: stephentoub <2642209+stephentoub@users.noreply.github.com>
---
.../ChatCompletion/ChatOptions.cs | 6 -
.../ChatCompletion/ReasoningEffort.cs | 16 +--
.../ChatCompletion/ReasoningOptions.cs | 2 +-
.../ChatCompletion/ReasoningOutput.cs | 6 +-
.../OpenAIResponsesChatClient.cs | 4 +-
.../ChatCompletion/ReasoningOptionsTests.cs | 121 ++++++++++++++++++
.../TestJsonSerializerContext.cs | 3 +
.../OpenAIChatClientTests.cs | 90 +++++++++++++
.../OpenAIResponseClientTests.cs | 109 ++++++++++++++++
9 files changed, 337 insertions(+), 20 deletions(-)
create mode 100644 test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ReasoningOptionsTests.cs
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
index 718868b30db..0f86ca30de2 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ChatOptions.cs
@@ -112,12 +112,6 @@ protected ChatOptions(ChatOptions? other)
///
/// Gets or sets the reasoning options for the chat request.
///
- ///
- /// If , no reasoning options are specified and the client will use its default.
- /// Not all providers support reasoning options. Implementations should make a best-effort attempt
- /// to map the requested options to the provider's capabilities. If a provider doesn't support reasoning,
- /// these options may be ignored.
- ///
public ReasoningOptions? Reasoning { get; set; }
///
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
index 4636cff3a00..a1b40dbdae4 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningEffort.cs
@@ -7,34 +7,34 @@ namespace Microsoft.Extensions.AI;
/// Specifies the level of reasoning effort that should be applied when generating chat responses.
///
///
-/// This value controls how much computational effort the model should put into reasoning.
+/// This value suggests how much computational effort the model should put into reasoning.
/// Higher values may result in more thoughtful responses but with increased latency and token usage.
-/// The specific interpretation and support for each level may vary between providers.
+/// The specific interpretation and support for each level may vary between providers or even between models from the same provider.
///
public enum ReasoningEffort
{
///
- /// No reasoning effort. Disables reasoning for providers that support it.
+ /// No reasoning effort.
///
- None = 0,
+ None,
///
/// Low reasoning effort. Minimal reasoning for faster responses.
///
- Low = 1,
+ Low,
///
/// Medium reasoning effort. Balanced reasoning for most use cases.
///
- Medium = 2,
+ Medium,
///
/// High reasoning effort. Extensive reasoning for complex tasks.
///
- High = 3,
+ High,
///
/// Extra high reasoning effort. Maximum reasoning for the most demanding tasks.
///
- ExtraHigh = 4,
+ ExtraHigh,
}
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
index b55bf8f09c8..7f302461490 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOptions.cs
@@ -15,7 +15,7 @@ namespace Microsoft.Extensions.AI;
///
/// Not all providers support all reasoning options. Implementations should
/// make a best-effort attempt to map the requested options to the provider's
-/// capabilities. If a provider doesn't support reasoning, these options may be ignored.
+/// capabilities. If a provider or model doesn't support reasoning or doesn't support the requested configuration of reasoning, these options may be ignored.
///
///
public sealed class ReasoningOptions
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
index 90c53d1c9e2..c384eeec928 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
@@ -15,15 +15,15 @@ public enum ReasoningOutput
///
/// No reasoning output. Do not include reasoning content in the response.
///
- None = 0,
+ None,
///
/// Summary reasoning output. Include a summary of the reasoning process.
///
- Summary = 1,
+ Summary,
///
/// Detailed reasoning output. Include detailed reasoning content in the response.
///
- Detailed = 2,
+ Detailed,
}
diff --git a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs
index 1d882b2f1f4..0d690006b97 100644
--- a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs
@@ -828,14 +828,14 @@ ChatResponseFormatJson jsonFormat when OpenAIClientExtensions.StrictSchemaTransf
ReasoningEffort.Medium => ResponseReasoningEffortLevel.Medium,
ReasoningEffort.High => ResponseReasoningEffortLevel.High,
ReasoningEffort.ExtraHigh => ResponseReasoningEffortLevel.High, // Map to highest available
- _ => null, // None or null - let OpenAI use its default
+ _ => (ResponseReasoningEffortLevel?)null, // None or null - let OpenAI use its default
};
ResponseReasoningSummaryVerbosity? summary = reasoning.Output switch
{
ReasoningOutput.Summary => ResponseReasoningSummaryVerbosity.Concise,
ReasoningOutput.Detailed => ResponseReasoningSummaryVerbosity.Detailed,
- _ => null, // None or null - let OpenAI use its default
+ _ => (ResponseReasoningSummaryVerbosity?)null, // None or null - let OpenAI use its default
};
if (effortLevel is null && summary is null)
diff --git a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ReasoningOptionsTests.cs b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ReasoningOptionsTests.cs
new file mode 100644
index 00000000000..4241eef163b
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ReasoningOptionsTests.cs
@@ -0,0 +1,121 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Text.Json;
+using Xunit;
+
+namespace Microsoft.Extensions.AI;
+
+public class ReasoningOptionsTests
+{
+ [Fact]
+ public void Constructor_Default_PropertiesAreNull()
+ {
+ ReasoningOptions options = new();
+
+ Assert.Null(options.Effort);
+ Assert.Null(options.Output);
+ }
+
+ [Theory]
+ [InlineData(ReasoningEffort.None)]
+ [InlineData(ReasoningEffort.Low)]
+ [InlineData(ReasoningEffort.Medium)]
+ [InlineData(ReasoningEffort.High)]
+ [InlineData(ReasoningEffort.ExtraHigh)]
+ public void Effort_Roundtrips(ReasoningEffort effort)
+ {
+ ReasoningOptions options = new() { Effort = effort };
+ Assert.Equal(effort, options.Effort);
+ }
+
+ [Theory]
+ [InlineData(ReasoningOutput.None)]
+ [InlineData(ReasoningOutput.Summary)]
+ [InlineData(ReasoningOutput.Detailed)]
+ public void Output_Roundtrips(ReasoningOutput output)
+ {
+ ReasoningOptions options = new() { Output = output };
+ Assert.Equal(output, options.Output);
+ }
+
+ [Fact]
+ public void JsonSerialization_Roundtrips()
+ {
+ ReasoningOptions options = new()
+ {
+ Effort = ReasoningEffort.High,
+ Output = ReasoningOutput.Detailed,
+ };
+
+ string json = JsonSerializer.Serialize(options, TestJsonSerializerContext.Default.ReasoningOptions);
+ ReasoningOptions? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningOptions);
+
+ Assert.NotNull(deserialized);
+ Assert.Equal(options.Effort, deserialized.Effort);
+ Assert.Equal(options.Output, deserialized.Output);
+ }
+
+ [Fact]
+ public void JsonSerialization_NullProperties_Roundtrips()
+ {
+ ReasoningOptions options = new();
+
+ string json = JsonSerializer.Serialize(options, TestJsonSerializerContext.Default.ReasoningOptions);
+ ReasoningOptions? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningOptions);
+
+ Assert.NotNull(deserialized);
+ Assert.Null(deserialized.Effort);
+ Assert.Null(deserialized.Output);
+ }
+
+ [Fact]
+ public void JsonSerialization_EffortOnly_Roundtrips()
+ {
+ ReasoningOptions options = new() { Effort = ReasoningEffort.Medium };
+
+ string json = JsonSerializer.Serialize(options, TestJsonSerializerContext.Default.ReasoningOptions);
+ ReasoningOptions? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningOptions);
+
+ Assert.NotNull(deserialized);
+ Assert.Equal(ReasoningEffort.Medium, deserialized.Effort);
+ Assert.Null(deserialized.Output);
+ }
+
+ [Fact]
+ public void JsonSerialization_OutputOnly_Roundtrips()
+ {
+ ReasoningOptions options = new() { Output = ReasoningOutput.Summary };
+
+ string json = JsonSerializer.Serialize(options, TestJsonSerializerContext.Default.ReasoningOptions);
+ ReasoningOptions? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningOptions);
+
+ Assert.NotNull(deserialized);
+ Assert.Null(deserialized.Effort);
+ Assert.Equal(ReasoningOutput.Summary, deserialized.Output);
+ }
+
+ [Fact]
+ public void JsonSerialization_AllEffortValues_SerializeAsStrings()
+ {
+ // Test all ReasoningEffort values serialize correctly
+ foreach (ReasoningEffort effort in new[] { ReasoningEffort.None, ReasoningEffort.Low, ReasoningEffort.Medium, ReasoningEffort.High, ReasoningEffort.ExtraHigh })
+ {
+ string json = JsonSerializer.Serialize(effort, TestJsonSerializerContext.Default.ReasoningEffort);
+ ReasoningEffort? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningEffort);
+ Assert.Equal(effort, deserialized);
+ }
+ }
+
+ [Fact]
+ public void JsonSerialization_AllOutputValues_SerializeAsStrings()
+ {
+ // Test all ReasoningOutput values serialize correctly
+ foreach (ReasoningOutput output in new[] { ReasoningOutput.None, ReasoningOutput.Summary, ReasoningOutput.Detailed })
+ {
+ string json = JsonSerializer.Serialize(output, TestJsonSerializerContext.Default.ReasoningOutput);
+ ReasoningOutput? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningOutput);
+ Assert.Equal(output, deserialized);
+ }
+ }
+}
diff --git a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/TestJsonSerializerContext.cs b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/TestJsonSerializerContext.cs
index faaa799baf4..6c448d0efb1 100644
--- a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/TestJsonSerializerContext.cs
+++ b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/TestJsonSerializerContext.cs
@@ -41,4 +41,7 @@ namespace Microsoft.Extensions.AI;
[JsonSerializable(typeof(ResponseContinuationToken))]
[JsonSerializable(typeof(UserInputRequestContent[]))]
[JsonSerializable(typeof(UserInputResponseContent[]))]
+[JsonSerializable(typeof(ReasoningOptions))]
+[JsonSerializable(typeof(ReasoningEffort))]
+[JsonSerializable(typeof(ReasoningOutput))]
internal sealed partial class TestJsonSerializerContext : JsonSerializerContext;
diff --git a/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIChatClientTests.cs b/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIChatClientTests.cs
index 99aa44cdd0f..f6d6ad95eb8 100644
--- a/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIChatClientTests.cs
+++ b/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIChatClientTests.cs
@@ -1816,4 +1816,94 @@ public void AsChatMessages_PreservesRole_MultipleMessages()
Assert.Equal(ChatRole.Assistant, extMessages[2].Role);
Assert.Equal(ChatRole.System, extMessages[3].Role);
}
+
+ [Theory]
+ [InlineData(ReasoningEffort.Low, "low")]
+ [InlineData(ReasoningEffort.Medium, "medium")]
+ [InlineData(ReasoningEffort.High, "high")]
+ [InlineData(ReasoningEffort.ExtraHigh, "high")] // ExtraHigh maps to high in OpenAI
+ public async Task ReasoningOptions_Effort_ProducesExpectedJson(ReasoningEffort effort, string expectedEffortString)
+ {
+ string input = $$"""
+ {
+ "messages": [
+ {
+ "role": "user",
+ "content": "hello"
+ }
+ ],
+ "model": "o4-mini",
+ "reasoning_effort": "{{expectedEffortString}}"
+ }
+ """;
+
+ const string Output = """
+ {
+ "id": "chatcmpl-test",
+ "object": "chat.completion",
+ "model": "o4-mini",
+ "choices": [
+ {
+ "message": {
+ "role": "assistant",
+ "content": "Hello!"
+ },
+ "finish_reason": "stop"
+ }
+ ]
+ }
+ """;
+
+ using VerbatimHttpHandler handler = new(input, Output);
+ using HttpClient httpClient = new(handler);
+ using IChatClient client = CreateChatClient(httpClient, "o4-mini");
+
+ Assert.NotNull(await client.GetResponseAsync("hello", new()
+ {
+ Reasoning = new ReasoningOptions { Effort = effort }
+ }));
+ }
+
+ [Fact]
+ public async Task ReasoningOptions_None_ProducesNoReasoningEffortInJson()
+ {
+ const string Input = """
+ {
+ "messages": [
+ {
+ "role": "user",
+ "content": "hello"
+ }
+ ],
+ "model": "gpt-4o-mini"
+ }
+ """;
+
+ const string Output = """
+ {
+ "id": "chatcmpl-test",
+ "object": "chat.completion",
+ "model": "gpt-4o-mini",
+ "choices": [
+ {
+ "message": {
+ "role": "assistant",
+ "content": "Hello!"
+ },
+ "finish_reason": "stop"
+ }
+ ]
+ }
+ """;
+
+ using VerbatimHttpHandler handler = new(Input, Output);
+ using HttpClient httpClient = new(handler);
+ using IChatClient client = CreateChatClient(httpClient, "gpt-4o-mini");
+
+ // None effort should not include reasoning_effort in the request
+ Assert.NotNull(await client.GetResponseAsync("hello", new()
+ {
+ Reasoning = new ReasoningOptions { Effort = ReasoningEffort.None }
+ }));
+ }
}
diff --git a/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs b/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs
index d1a6749450b..3a8008ab53a 100644
--- a/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs
+++ b/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs
@@ -5671,6 +5671,115 @@ static bool HasCorrectImageData(AIContent o, int index)
Assert.Equal("img_call_ghi789", toolCall.ImageId);
}
+ [Theory]
+ [InlineData(ReasoningEffort.Low, ReasoningOutput.Summary, "low", "concise")]
+ [InlineData(ReasoningEffort.Medium, ReasoningOutput.Detailed, "medium", "detailed")]
+ [InlineData(ReasoningEffort.High, ReasoningOutput.Summary, "high", "concise")]
+ [InlineData(ReasoningEffort.ExtraHigh, ReasoningOutput.Detailed, "high", "detailed")] // ExtraHigh maps to high
+ public async Task ReasoningOptions_EffortAndOutput_ProducesExpectedJson(
+ ReasoningEffort effort,
+ ReasoningOutput output,
+ string expectedEffortString,
+ string expectedSummaryString)
+ {
+ string input = $$"""
+ {
+ "model": "o4-mini",
+ "input": [{
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": "hello"}]
+ }],
+ "reasoning": {
+ "effort": "{{expectedEffortString}}",
+ "summary": "{{expectedSummaryString}}"
+ }
+ }
+ """;
+
+ const string Output = """
+ {
+ "id": "resp_test",
+ "object": "response",
+ "created_at": 1741891428,
+ "status": "completed",
+ "model": "o4-mini",
+ "output": [
+ {
+ "id": "msg_test",
+ "type": "message",
+ "status": "completed",
+ "role": "assistant",
+ "content": [{"type": "output_text", "text": "Hello!"}]
+ }
+ ],
+ "usage": {
+ "input_tokens": 10,
+ "output_tokens": 5,
+ "total_tokens": 15
+ }
+ }
+ """;
+
+ using VerbatimHttpHandler handler = new(input, Output);
+ using HttpClient httpClient = new(handler);
+ using IChatClient client = CreateResponseClient(httpClient, "o4-mini");
+
+ Assert.NotNull(await client.GetResponseAsync("hello", new()
+ {
+ Reasoning = new ReasoningOptions { Effort = effort, Output = output }
+ }));
+ }
+
+ [Fact]
+ public async Task ReasoningOptions_NoneValues_ProducesNoReasoningInJson()
+ {
+ const string Input = """
+ {
+ "model": "o4-mini",
+ "input": [{
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": "hello"}]
+ }]
+ }
+ """;
+
+ const string Output = """
+ {
+ "id": "resp_test",
+ "object": "response",
+ "created_at": 1741891428,
+ "status": "completed",
+ "model": "o4-mini",
+ "output": [
+ {
+ "id": "msg_test",
+ "type": "message",
+ "status": "completed",
+ "role": "assistant",
+ "content": [{"type": "output_text", "text": "Hello!"}]
+ }
+ ],
+ "usage": {
+ "input_tokens": 10,
+ "output_tokens": 5,
+ "total_tokens": 15
+ }
+ }
+ """;
+
+ using VerbatimHttpHandler handler = new(Input, Output);
+ using HttpClient httpClient = new(handler);
+ using IChatClient client = CreateResponseClient(httpClient, "o4-mini");
+
+ // None values for both should result in no reasoning options in the request
+ Assert.NotNull(await client.GetResponseAsync("hello", new()
+ {
+ Reasoning = new ReasoningOptions { Effort = ReasoningEffort.None, Output = ReasoningOutput.None }
+ }));
+ }
+
private static IChatClient CreateResponseClient(HttpClient httpClient, string modelId) =>
new OpenAIClient(
new ApiKeyCredential("apikey"),
From 88c12a325ff35babcf7c0eb23502432be3b17853 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Thu, 5 Feb 2026 13:58:30 +0000
Subject: [PATCH 6/6] Rename ReasoningOutput.Detailed to ReasoningOutput.Full
Co-authored-by: stephentoub <2642209+stephentoub@users.noreply.github.com>
---
.../ChatCompletion/ReasoningOutput.cs | 4 ++--
.../Microsoft.Extensions.AI.Abstractions.json | 2 +-
.../OpenAIResponsesChatClient.cs | 2 +-
.../ChatCompletion/ChatOptionsTests.cs | 4 ++--
.../ChatCompletion/ReasoningOptionsTests.cs | 6 +++---
.../OpenAIResponseClientTests.cs | 4 ++--
6 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
index c384eeec928..9250c81a5c8 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/ChatCompletion/ReasoningOutput.cs
@@ -23,7 +23,7 @@ public enum ReasoningOutput
Summary,
///
- /// Detailed reasoning output. Include detailed reasoning content in the response.
+ /// Full reasoning output. Include all reasoning content in the response.
///
- Detailed,
+ Full,
}
diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/Microsoft.Extensions.AI.Abstractions.json b/src/Libraries/Microsoft.Extensions.AI.Abstractions/Microsoft.Extensions.AI.Abstractions.json
index 96005b76bab..4b7da685831 100644
--- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/Microsoft.Extensions.AI.Abstractions.json
+++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/Microsoft.Extensions.AI.Abstractions.json
@@ -2193,7 +2193,7 @@
"Value": "1"
},
{
- "Member": "const Microsoft.Extensions.AI.ReasoningOutput Microsoft.Extensions.AI.ReasoningOutput.Detailed",
+ "Member": "const Microsoft.Extensions.AI.ReasoningOutput Microsoft.Extensions.AI.ReasoningOutput.Full",
"Stage": "Stable",
"Value": "2"
}
diff --git a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs
index 0d690006b97..98217dfd791 100644
--- a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs
+++ b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs
@@ -834,7 +834,7 @@ ChatResponseFormatJson jsonFormat when OpenAIClientExtensions.StrictSchemaTransf
ResponseReasoningSummaryVerbosity? summary = reasoning.Output switch
{
ReasoningOutput.Summary => ResponseReasoningSummaryVerbosity.Concise,
- ReasoningOutput.Detailed => ResponseReasoningSummaryVerbosity.Detailed,
+ ReasoningOutput.Full => ResponseReasoningSummaryVerbosity.Detailed,
_ => (ResponseReasoningSummaryVerbosity?)null, // None or null - let OpenAI use its default
};
diff --git a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ChatOptionsTests.cs b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ChatOptionsTests.cs
index 9c91c1c9ab3..4b8b4aa2f94 100644
--- a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ChatOptionsTests.cs
+++ b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ChatOptionsTests.cs
@@ -178,7 +178,7 @@ public void JsonSerialization_Roundtrips()
options.FrequencyPenalty = 0.4f;
options.PresencePenalty = 0.5f;
options.Seed = 12345;
- options.Reasoning = new ReasoningOptions { Effort = ReasoningEffort.High, Output = ReasoningOutput.Detailed };
+ options.Reasoning = new ReasoningOptions { Effort = ReasoningEffort.High, Output = ReasoningOutput.Full };
options.ResponseFormat = ChatResponseFormat.Json;
options.ModelId = "modelId";
options.StopSequences = stopSequences;
@@ -210,7 +210,7 @@ public void JsonSerialization_Roundtrips()
Assert.Equal(12345, deserialized.Seed);
Assert.NotNull(deserialized.Reasoning);
Assert.Equal(ReasoningEffort.High, deserialized.Reasoning.Effort);
- Assert.Equal(ReasoningOutput.Detailed, deserialized.Reasoning.Output);
+ Assert.Equal(ReasoningOutput.Full, deserialized.Reasoning.Output);
Assert.IsType(deserialized.ResponseFormat);
Assert.Equal("modelId", deserialized.ModelId);
Assert.NotSame(stopSequences, deserialized.StopSequences);
diff --git a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ReasoningOptionsTests.cs b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ReasoningOptionsTests.cs
index 4241eef163b..273455f36fe 100644
--- a/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ReasoningOptionsTests.cs
+++ b/test/Libraries/Microsoft.Extensions.AI.Abstractions.Tests/ChatCompletion/ReasoningOptionsTests.cs
@@ -32,7 +32,7 @@ public void Effort_Roundtrips(ReasoningEffort effort)
[Theory]
[InlineData(ReasoningOutput.None)]
[InlineData(ReasoningOutput.Summary)]
- [InlineData(ReasoningOutput.Detailed)]
+ [InlineData(ReasoningOutput.Full)]
public void Output_Roundtrips(ReasoningOutput output)
{
ReasoningOptions options = new() { Output = output };
@@ -45,7 +45,7 @@ public void JsonSerialization_Roundtrips()
ReasoningOptions options = new()
{
Effort = ReasoningEffort.High,
- Output = ReasoningOutput.Detailed,
+ Output = ReasoningOutput.Full,
};
string json = JsonSerializer.Serialize(options, TestJsonSerializerContext.Default.ReasoningOptions);
@@ -111,7 +111,7 @@ public void JsonSerialization_AllEffortValues_SerializeAsStrings()
public void JsonSerialization_AllOutputValues_SerializeAsStrings()
{
// Test all ReasoningOutput values serialize correctly
- foreach (ReasoningOutput output in new[] { ReasoningOutput.None, ReasoningOutput.Summary, ReasoningOutput.Detailed })
+ foreach (ReasoningOutput output in new[] { ReasoningOutput.None, ReasoningOutput.Summary, ReasoningOutput.Full })
{
string json = JsonSerializer.Serialize(output, TestJsonSerializerContext.Default.ReasoningOutput);
ReasoningOutput? deserialized = JsonSerializer.Deserialize(json, TestJsonSerializerContext.Default.ReasoningOutput);
diff --git a/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs b/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs
index 3a8008ab53a..eea003bf9d1 100644
--- a/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs
+++ b/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs
@@ -5673,9 +5673,9 @@ static bool HasCorrectImageData(AIContent o, int index)
[Theory]
[InlineData(ReasoningEffort.Low, ReasoningOutput.Summary, "low", "concise")]
- [InlineData(ReasoningEffort.Medium, ReasoningOutput.Detailed, "medium", "detailed")]
+ [InlineData(ReasoningEffort.Medium, ReasoningOutput.Full, "medium", "detailed")]
[InlineData(ReasoningEffort.High, ReasoningOutput.Summary, "high", "concise")]
- [InlineData(ReasoningEffort.ExtraHigh, ReasoningOutput.Detailed, "high", "detailed")] // ExtraHigh maps to high
+ [InlineData(ReasoningEffort.ExtraHigh, ReasoningOutput.Full, "high", "detailed")] // ExtraHigh maps to high
public async Task ReasoningOptions_EffortAndOutput_ProducesExpectedJson(
ReasoningEffort effort,
ReasoningOutput output,